diff options
author | dim <dim@FreeBSD.org> | 2014-11-24 09:15:30 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2014-11-24 09:15:30 +0000 |
commit | 173a4f43a911175643bda81ee675e8d9269056ea (patch) | |
tree | 47df2c12b57214af6c31e47404b005675b8b7ffc /include/clang/Lex/Preprocessor.h | |
parent | 88f7a7d5251a2d813460274c92decc143a11569b (diff) | |
download | FreeBSD-src-173a4f43a911175643bda81ee675e8d9269056ea.zip FreeBSD-src-173a4f43a911175643bda81ee675e8d9269056ea.tar.gz |
Vendor import of clang RELEASE_350/final tag r216957 (effectively, 3.5.0 release):
https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_350/final@216957
Diffstat (limited to 'include/clang/Lex/Preprocessor.h')
-rw-r--r-- | include/clang/Lex/Preprocessor.h | 662 |
1 files changed, 363 insertions, 299 deletions
diff --git a/include/clang/Lex/Preprocessor.h b/include/clang/Lex/Preprocessor.h index 223fd47..d4b4ba2 100644 --- a/include/clang/Lex/Preprocessor.h +++ b/include/clang/Lex/Preprocessor.h @@ -6,9 +6,10 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// -// -// This file defines the Preprocessor interface. -// +/// +/// \file +/// \brief Defines the clang::Preprocessor interface. +/// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LEX_PREPROCESSOR_H @@ -28,10 +29,10 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" -#include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Allocator.h" +#include <memory> #include <vector> namespace llvm { @@ -64,7 +65,7 @@ class TokenValue { IdentifierInfo *II; public: - TokenValue(tok::TokenKind Kind) : Kind(Kind), II(0) { + TokenValue(tok::TokenKind Kind) : Kind(Kind), II(nullptr) { assert(Kind != tok::raw_identifier && "Raw identifiers are not supported."); assert(Kind != tok::identifier && "Identifiers should be created by TokenValue(IdentifierInfo *)"); @@ -78,11 +79,12 @@ public: } }; -/// Preprocessor - This object engages in a tight little dance with the lexer to -/// efficiently preprocess tokens. Lexers know only about tokens within a -/// single source file, and don't know anything about preprocessor-level issues -/// like the \#include stack, token expansion, etc. +/// \brief Engages in a tight little dance with the lexer to efficiently +/// preprocess tokens. /// +/// Lexers know only about tokens within a single source file, and don't +/// know anything about preprocessor-level issues like the \#include stack, +/// token expansion, etc. class Preprocessor : public RefCountedBase<Preprocessor> { IntrusiveRefCntPtr<PreprocessorOptions> PPOpts; DiagnosticsEngine *Diags; @@ -98,12 +100,12 @@ class Preprocessor : public RefCountedBase<Preprocessor> { ExternalPreprocessorSource *ExternalSource; - /// PTH - An optional PTHManager object used for getting tokens from - /// a token cache rather than lexing the original source file. - OwningPtr<PTHManager> PTH; + /// An optional PTHManager object used for getting tokens from + /// a token cache rather than lexing the original source file. + std::unique_ptr<PTHManager> PTH; - /// BP - A BumpPtrAllocator object used to quickly allocate and release - /// objects internal to the Preprocessor. + /// A BumpPtrAllocator object used to quickly allocate and release + /// objects internal to the Preprocessor. llvm::BumpPtrAllocator BP; /// Identifiers for builtin macros and other builtins. @@ -114,6 +116,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> { IdentifierInfo *Ident__TIMESTAMP__; // __TIMESTAMP__ IdentifierInfo *Ident__COUNTER__; // __COUNTER__ IdentifierInfo *Ident_Pragma, *Ident__pragma; // _Pragma, __pragma + IdentifierInfo *Ident__identifier; // __identifier IdentifierInfo *Ident__VA_ARGS__; // __VA_ARGS__ IdentifierInfo *Ident__has_feature; // __has_feature IdentifierInfo *Ident__has_extension; // __has_extension @@ -122,6 +125,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> { IdentifierInfo *Ident__has_include; // __has_include IdentifierInfo *Ident__has_include_next; // __has_include_next IdentifierInfo *Ident__has_warning; // __has_warning + IdentifierInfo *Ident__is_identifier; // __is_identifier IdentifierInfo *Ident__building_module; // __building_module IdentifierInfo *Ident__MODULE__; // __MODULE__ @@ -129,7 +133,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> { unsigned CounterValue; // Next __COUNTER__ value. enum { - /// MaxIncludeStackDepth - Maximum depth of \#includes. + /// \brief Maximum depth of \#includes. MaxAllowedIncludeStackDepth = 200 }; @@ -144,12 +148,11 @@ class Preprocessor : public RefCountedBase<Preprocessor> { /// Whether the preprocessor owns the header search object. bool OwnsHeaderSearch : 1; - /// DisableMacroExpansion - True if macro expansion is disabled. + /// True if macro expansion is disabled. bool DisableMacroExpansion : 1; - /// MacroExpansionInDirectivesOverride - Temporarily disables - /// DisableMacroExpansion (i.e. enables expansion) when parsing preprocessor - /// directives. + /// Temporarily disables DisableMacroExpansion (i.e. enables expansion) + /// when parsing preprocessor directives. bool MacroExpansionInDirectivesOverride : 1; class ResetMacroExpansionHelper; @@ -169,23 +172,25 @@ class Preprocessor : public RefCountedBase<Preprocessor> { /// \brief True if we are pre-expanding macro arguments. bool InMacroArgPreExpansion; - /// Identifiers - This is mapping/lookup information for all identifiers in + /// \brief Mapping/lookup information for all identifiers in /// the program, including program keywords. mutable IdentifierTable Identifiers; - /// Selectors - This table contains all the selectors in the program. Unlike - /// IdentifierTable above, this table *isn't* populated by the preprocessor. - /// It is declared/expanded here because it's role/lifetime is - /// conceptually similar the IdentifierTable. In addition, the current control - /// flow (in clang::ParseAST()), make it convenient to put here. + /// \brief This table contains all the selectors in the program. + /// + /// Unlike IdentifierTable above, this table *isn't* populated by the + /// preprocessor. It is declared/expanded here because its role/lifetime is + /// conceptually similar to the IdentifierTable. In addition, the current + /// control flow (in clang::ParseAST()), make it convenient to put here. + /// /// FIXME: Make sure the lifetime of Identifiers/Selectors *isn't* tied to /// the lifetime of the preprocessor. SelectorTable Selectors; - /// BuiltinInfo - Information about builtins. + /// \brief Information about builtins. Builtin::Context BuiltinInfo; - /// PragmaHandlers - This tracks all of the pragmas that the client registered + /// \brief Tracks all of the pragmas that the client registered /// with this preprocessor. PragmaNamespace *PragmaHandlers; @@ -197,6 +202,9 @@ class Preprocessor : public RefCountedBase<Preprocessor> { /// avoid tearing the Lexer and etc. down). bool IncrementalProcessing; + /// The kind of translation unit we are processing. + TranslationUnitKind TUKind; + /// \brief The code-completion handler. CodeCompletionHandler *CodeComplete; @@ -216,7 +224,7 @@ class Preprocessor : public RefCountedBase<Preprocessor> { /// for preprocessing. SourceLocation CodeCompletionFileLoc; - /// \brief The source location of the 'import' contextual keyword we just + /// \brief The source location of the \c import contextual keyword we just /// lexed, if any. SourceLocation ModuleImportLoc; @@ -226,46 +234,53 @@ class Preprocessor : public RefCountedBase<Preprocessor> { /// \brief Whether the last token we lexed was an '@'. bool LastTokenWasAt; - /// \brief Whether the module import expectes an identifier next. Otherwise, + /// \brief Whether the module import expects an identifier next. Otherwise, /// it expects a '.' or ';'. bool ModuleImportExpectsIdentifier; /// \brief The source location of the currently-active - /// #pragma clang arc_cf_code_audited begin. + /// \#pragma clang arc_cf_code_audited begin. SourceLocation PragmaARCCFCodeAuditedLoc; /// \brief True if we hit the code-completion point. bool CodeCompletionReached; /// \brief The number of bytes that we will initially skip when entering the - /// main file, which is used when loading a precompiled preamble, along - /// with a flag that indicates whether skipping this number of bytes will - /// place the lexer at the start of a line. + /// main file, along with a flag that indicates whether skipping this number + /// of bytes will place the lexer at the start of a line. + /// + /// This is used when loading a precompiled preamble. std::pair<unsigned, bool> SkipMainFilePreamble; - /// CurLexer - This is the current top of the stack that we're lexing from if + /// \brief The current top of the stack that we're lexing from if /// not expanding a macro and we are lexing directly from source code. - /// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null. - OwningPtr<Lexer> CurLexer; + /// + /// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null. + std::unique_ptr<Lexer> CurLexer; - /// CurPTHLexer - This is the current top of stack that we're lexing from if - /// not expanding from a macro and we are lexing from a PTH cache. - /// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null. - OwningPtr<PTHLexer> CurPTHLexer; + /// \brief The current top of stack that we're lexing from if + /// not expanding from a macro and we are lexing from a PTH cache. + /// + /// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null. + std::unique_ptr<PTHLexer> CurPTHLexer; - /// CurPPLexer - This is the current top of the stack what we're lexing from - /// if not expanding a macro. This is an alias for either CurLexer or - /// CurPTHLexer. + /// \brief The current top of the stack what we're lexing from + /// if not expanding a macro. + /// + /// This is an alias for either CurLexer or CurPTHLexer. PreprocessorLexer *CurPPLexer; - /// CurLookup - The DirectoryLookup structure used to find the current - /// FileEntry, if CurLexer is non-null and if applicable. This allows us to - /// implement \#include_next and find directory-specific properties. + /// \brief Used to find the current FileEntry, if CurLexer is non-null + /// and if applicable. + /// + /// This allows us to implement \#include_next and find directory-specific + /// properties. const DirectoryLookup *CurDirLookup; - /// CurTokenLexer - This is the current macro we are expanding, if we are - /// expanding a macro. One of CurLexer and CurTokenLexer must be null. - OwningPtr<TokenLexer> CurTokenLexer; + /// \brief The current macro we are expanding, if we are expanding a macro. + /// + /// One of CurLexer and CurTokenLexer must be null. + std::unique_ptr<TokenLexer> CurTokenLexer; /// \brief The kind of lexer we're currently working with. enum CurLexerKind { @@ -276,26 +291,48 @@ class Preprocessor : public RefCountedBase<Preprocessor> { CLK_LexAfterModuleImport } CurLexerKind; - /// IncludeMacroStack - This keeps track of the stack of files currently + /// \brief If the current lexer is for a submodule that is being built, this + /// is that submodule. + Module *CurSubmodule; + + /// \brief Keeps track of the stack of files currently /// \#included, and macros currently being expanded from, not counting /// CurLexer/CurTokenLexer. struct IncludeStackInfo { - enum CurLexerKind CurLexerKind; - Lexer *TheLexer; - PTHLexer *ThePTHLexer; - PreprocessorLexer *ThePPLexer; - TokenLexer *TheTokenLexer; - const DirectoryLookup *TheDirLookup; - - IncludeStackInfo(enum CurLexerKind K, Lexer *L, PTHLexer* P, - PreprocessorLexer* PPL, - TokenLexer* TL, const DirectoryLookup *D) - : CurLexerKind(K), TheLexer(L), ThePTHLexer(P), ThePPLexer(PPL), - TheTokenLexer(TL), TheDirLookup(D) {} + enum CurLexerKind CurLexerKind; + Module *TheSubmodule; + std::unique_ptr<Lexer> TheLexer; + std::unique_ptr<PTHLexer> ThePTHLexer; + PreprocessorLexer *ThePPLexer; + std::unique_ptr<TokenLexer> TheTokenLexer; + const DirectoryLookup *TheDirLookup; + + // The following constructors are completely useless copies of the default + // versions, only needed to pacify MSVC. + IncludeStackInfo(enum CurLexerKind CurLexerKind, Module *TheSubmodule, + std::unique_ptr<Lexer> &&TheLexer, + std::unique_ptr<PTHLexer> &&ThePTHLexer, + PreprocessorLexer *ThePPLexer, + std::unique_ptr<TokenLexer> &&TheTokenLexer, + const DirectoryLookup *TheDirLookup) + : CurLexerKind(std::move(CurLexerKind)), + TheSubmodule(std::move(TheSubmodule)), TheLexer(std::move(TheLexer)), + ThePTHLexer(std::move(ThePTHLexer)), + ThePPLexer(std::move(ThePPLexer)), + TheTokenLexer(std::move(TheTokenLexer)), + TheDirLookup(std::move(TheDirLookup)) {} + IncludeStackInfo(IncludeStackInfo &&RHS) + : CurLexerKind(std::move(RHS.CurLexerKind)), + TheSubmodule(std::move(RHS.TheSubmodule)), + TheLexer(std::move(RHS.TheLexer)), + ThePTHLexer(std::move(RHS.ThePTHLexer)), + ThePPLexer(std::move(RHS.ThePPLexer)), + TheTokenLexer(std::move(RHS.TheTokenLexer)), + TheDirLookup(std::move(RHS.TheDirLookup)) {} }; std::vector<IncludeStackInfo> IncludeMacroStack; - /// Callbacks - These are actions invoked when some preprocessor activity is + /// \brief Actions invoked when some preprocessor activity is /// encountered (e.g. a file is \#included, etc). PPCallbacks *Callbacks; @@ -308,14 +345,16 @@ class Preprocessor : public RefCountedBase<Preprocessor> { }; SmallVector<MacroExpandsInfo, 2> DelayedMacroExpandsCallbacks; - /// Macros - For each IdentifierInfo that was associated with a macro, we + /// For each IdentifierInfo that was associated with a macro, we /// keep a mapping to the history of all macro definitions and #undefs in /// the reverse order (the latest one is in the head of the list). llvm::DenseMap<const IdentifierInfo*, MacroDirective*> Macros; friend class ASTReader; /// \brief Macros that we want to warn because they are not used at the end - /// of the translation unit; we store just their SourceLocations instead + /// of the translation unit. + /// + /// We store just their SourceLocations instead of /// something like MacroInfo*. The benefit of this is that when we are /// deserializing from PCH, we don't need to deserialize identifier & macros /// just so that we can report that they are unused, we just warn using @@ -324,35 +363,36 @@ class Preprocessor : public RefCountedBase<Preprocessor> { typedef llvm::SmallPtrSet<SourceLocation, 32> WarnUnusedMacroLocsTy; WarnUnusedMacroLocsTy WarnUnusedMacroLocs; - /// MacroArgCache - This is a "freelist" of MacroArg objects that can be + /// \brief A "freelist" of MacroArg objects that can be /// reused for quick allocation. MacroArgs *MacroArgCache; friend class MacroArgs; - /// PragmaPushMacroInfo - For each IdentifierInfo used in a #pragma - /// push_macro directive, we keep a MacroInfo stack used to restore - /// previous macro value. + /// For each IdentifierInfo used in a \#pragma push_macro directive, + /// we keep a MacroInfo stack used to restore the previous macro value. llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> > PragmaPushMacroInfo; // Various statistics we track for performance analysis. - unsigned NumDirectives, NumIncluded, NumDefined, NumUndefined, NumPragma; + unsigned NumDirectives, NumDefined, NumUndefined, NumPragma; unsigned NumIf, NumElse, NumEndif; unsigned NumEnteredSourceFiles, MaxIncludeStackDepth; unsigned NumMacroExpanded, NumFnMacroExpanded, NumBuiltinMacroExpanded; unsigned NumFastMacroExpanded, NumTokenPaste, NumFastTokenPaste; unsigned NumSkipped; - /// Predefines - This string is the predefined macros that preprocessor - /// should use from the command line etc. + /// \brief The predefined macros that preprocessor should use from the + /// command line etc. std::string Predefines; /// \brief The file ID for the preprocessor predefines. FileID PredefinesFileID; - /// TokenLexerCache - Cache macro expanders to reduce malloc traffic. + /// \{ + /// \brief Cache of macro expanders to reduce malloc traffic. enum { TokenLexerCacheSize = 8 }; unsigned NumCachedTokenLexers; TokenLexer *TokenLexerCache[TokenLexerCacheSize]; + /// \} /// \brief Keeps macro expanded tokens for TokenLexers. // @@ -372,17 +412,20 @@ class Preprocessor : public RefCountedBase<Preprocessor> { private: // Cached tokens state. typedef SmallVector<Token, 1> CachedTokensTy; - /// CachedTokens - Cached tokens are stored here when we do backtracking or + /// \brief Cached tokens are stored here when we do backtracking or /// lookahead. They are "lexed" by the CachingLex() method. CachedTokensTy CachedTokens; - /// CachedLexPos - The position of the cached token that CachingLex() should - /// "lex" next. If it points beyond the CachedTokens vector, it means that - /// a normal Lex() should be invoked. + /// \brief The position of the cached token that CachingLex() should + /// "lex" next. + /// + /// If it points beyond the CachedTokens vector, it means that a normal + /// Lex() should be invoked. CachedTokensTy::size_type CachedLexPos; - /// BacktrackPositions - Stack of backtrack positions, allowing nested - /// backtracks. The EnableBacktrackAtThisPos() method pushes a position to + /// \brief Stack of backtrack positions, allowing nested backtracks. + /// + /// The EnableBacktrackAtThisPos() method pushes a position to /// indicate where CachedLexPos should be set when the BackTrack() method is /// invoked (at which point the last position is popped). std::vector<CachedTokensTy::size_type> BacktrackPositions; @@ -397,7 +440,7 @@ private: // Cached tokens state. /// of that list. MacroInfoChain *MIChainHead; - /// MICache - A "freelist" of MacroInfo objects that can be reused for quick + /// A "freelist" of MacroInfo objects that can be reused for quick /// allocation. MacroInfoChain *MICache; @@ -412,20 +455,18 @@ private: // Cached tokens state. public: Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts, DiagnosticsEngine &diags, LangOptions &opts, - const TargetInfo *target, SourceManager &SM, HeaderSearch &Headers, ModuleLoader &TheModuleLoader, - IdentifierInfoLookup *IILookup = 0, + IdentifierInfoLookup *IILookup = nullptr, bool OwnsHeaderSearch = false, - bool DelayInitialization = false, - bool IncrProcessing = false); + TranslationUnitKind TUKind = TU_Complete); ~Preprocessor(); - /// \brief Initialize the preprocessor, if the constructor did not already - /// perform the initialization. + /// \brief Initialize the preprocessor using information about the target. /// - /// \param Target Information about the target. + /// \param Target is owned by the caller and must remain valid for the + /// lifetime of the preprocessor. void Initialize(const TargetInfo &Target); /// \brief Retrieve the preprocessor options used to initialize this @@ -470,8 +511,7 @@ public: return ParsingIfOrElifDirective; } - /// SetCommentRetentionState - Control whether or not the preprocessor retains - /// comments in output. + /// \brief Control whether the preprocessor retains comments in output. void SetCommentRetentionState(bool KeepComments, bool KeepMacroComments) { this->KeepComments = KeepComments | KeepMacroComments; this->KeepMacroComments = KeepMacroComments; @@ -500,26 +540,29 @@ public: /// false if it is producing tokens to be consumed by Parse and Sema. bool isPreprocessedOutput() const { return PreprocessedOutput; } - /// isCurrentLexer - Return true if we are lexing directly from the specified - /// lexer. + /// \brief Return true if we are lexing directly from the specified lexer. bool isCurrentLexer(const PreprocessorLexer *L) const { return CurPPLexer == L; } - /// getCurrentLexer - Return the current lexer being lexed from. Note - /// that this ignores any potentially active macro expansions and _Pragma + /// \brief Return the current lexer being lexed from. + /// + /// Note that this ignores any potentially active macro expansions and _Pragma /// expansions going on at the time. PreprocessorLexer *getCurrentLexer() const { return CurPPLexer; } - /// getCurrentFileLexer - Return the current file lexer being lexed from. + /// \brief Return the current file lexer being lexed from. + /// /// Note that this ignores any potentially active macro expansions and _Pragma /// expansions going on at the time. PreprocessorLexer *getCurrentFileLexer() const; - /// \brief Returns the file ID for the preprocessor predefines. + /// \brief Returns the FileID for the preprocessor predefines. FileID getPredefinesFileID() const { return PredefinesFileID; } - /// getPPCallbacks/addPPCallbacks - Accessors for preprocessor callbacks. + /// \{ + /// \brief Accessors for preprocessor callbacks. + /// /// Note that this class takes ownership of any PPCallbacks object given to /// it. PPCallbacks *getPPCallbacks() const { return Callbacks; } @@ -528,12 +571,13 @@ public: C = new PPChainedCallbacks(C, Callbacks); Callbacks = C; } + /// \} /// \brief Given an identifier, return its latest MacroDirective if it is - // \#defined or null if it isn't \#define'd. + /// \#defined or null if it isn't \#define'd. MacroDirective *getMacroDirective(IdentifierInfo *II) const { if (!II->hasMacroDefinition()) - return 0; + return nullptr; MacroDirective *MD = getMacroDirectiveHistory(II); assert(MD->isDefined() && "Macro is undefined!"); @@ -547,13 +591,14 @@ public: MacroInfo *getMacroInfo(IdentifierInfo *II) { if (MacroDirective *MD = getMacroDirective(II)) return MD->getMacroInfo(); - return 0; + return nullptr; } /// \brief Given an identifier, return the (probably #undef'd) MacroInfo - /// representing the most recent macro definition. One can iterate over all - /// previous macro definitions from it. This method should only be called for - /// identifiers that hadMacroDefinition(). + /// representing the most recent macro definition. + /// + /// One can iterate over all previous macro definitions from the most recent + /// one. This should only be called for identifiers that hadMacroDefinition(). MacroDirective *getMacroDirectiveHistory(const IdentifierInfo *II) const; /// \brief Add a directive to the macro directive history for this identifier. @@ -571,14 +616,15 @@ public: /// \brief Set a MacroDirective that was loaded from a PCH file. void setLoadedMacroDirective(IdentifierInfo *II, MacroDirective *MD); - /// macro_iterator/macro_begin/macro_end - This allows you to walk the macro - /// history table. Currently defined macros have + /// \{ + /// Iterators for the macro history table. Currently defined macros have /// IdentifierInfo::hasMacroDefinition() set and an empty /// MacroInfo::getUndefLoc() at the head of the list. typedef llvm::DenseMap<const IdentifierInfo *, MacroDirective*>::const_iterator macro_iterator; macro_iterator macro_begin(bool IncludeExternalMacros = true) const; macro_iterator macro_end(bool IncludeExternalMacros = true) const; + /// \} /// \brief Return the name of the macro defined before \p Loc that has /// spelling \p Tokens. If there are multiple macros with same spelling, @@ -587,8 +633,9 @@ public: ArrayRef<TokenValue> Tokens) const; const std::string &getPredefines() const { return Predefines; } - /// setPredefines - Set the predefines for this Preprocessor. These - /// predefines are automatically injected when parsing the main file. + /// \brief Set the predefines for this Preprocessor. + /// + /// These predefines are automatically injected when parsing the main file. void setPredefines(const char *P) { Predefines = P; } void setPredefines(const std::string &P) { Predefines = P; } @@ -598,23 +645,28 @@ public: return &Identifiers.get(Name); } - /// AddPragmaHandler - Add the specified pragma handler to the preprocessor. - /// If 'Namespace' is non-null, then it is a token required to exist on the + /// \brief Add the specified pragma handler to this preprocessor. + /// + /// If \p Namespace is non-null, then it is a token required to exist on the /// pragma line before the pragma string starts, e.g. "STDC" or "GCC". void AddPragmaHandler(StringRef Namespace, PragmaHandler *Handler); void AddPragmaHandler(PragmaHandler *Handler) { AddPragmaHandler(StringRef(), Handler); } - /// RemovePragmaHandler - Remove the specific pragma handler from - /// the preprocessor. If \p Namespace is non-null, then it should - /// be the namespace that \p Handler was added to. It is an error - /// to remove a handler that has not been registered. + /// \brief Remove the specific pragma handler from this preprocessor. + /// + /// If \p Namespace is non-null, then it should be the namespace that + /// \p Handler was added to. It is an error to remove a handler that + /// has not been registered. void RemovePragmaHandler(StringRef Namespace, PragmaHandler *Handler); void RemovePragmaHandler(PragmaHandler *Handler) { RemovePragmaHandler(StringRef(), Handler); } + /// Install empty handlers for all pragmas (making them ignored). + void IgnorePragmas(); + /// \brief Add the specified comment handler to the preprocessor. void addCommentHandler(CommentHandler *Handler); @@ -635,7 +687,7 @@ public: /// \brief Clear out the code completion handler. void clearCodeCompletionHandler() { - CodeComplete = 0; + CodeComplete = nullptr; } /// \brief Hook used by the lexer to invoke the "natural language" code @@ -650,51 +702,51 @@ public: /// all macro expansions, macro definitions, etc. void createPreprocessingRecord(); - /// EnterMainSourceFile - Enter the specified FileID as the main source file, + /// \brief Enter the specified FileID as the main source file, /// which implicitly adds the builtin defines etc. void EnterMainSourceFile(); - /// EndSourceFile - Inform the preprocessor callbacks that processing is - /// complete. + /// \brief Inform the preprocessor callbacks that processing is complete. void EndSourceFile(); - /// EnterSourceFile - Add a source file to the top of the include stack and - /// start lexing tokens from it instead of the current buffer. Emit an error - /// and don't enter the file on error. - void EnterSourceFile(FileID CurFileID, const DirectoryLookup *Dir, + /// \brief Add a source file to the top of the include stack and + /// start lexing tokens from it instead of the current buffer. + /// + /// Emits a diagnostic, doesn't enter the file, and returns true on error. + bool EnterSourceFile(FileID CurFileID, const DirectoryLookup *Dir, SourceLocation Loc); - /// EnterMacro - Add a Macro to the top of the include stack and start lexing - /// tokens from it instead of the current buffer. Args specifies the - /// tokens input to a function-like macro. + /// \brief Add a Macro to the top of the include stack and start lexing + /// tokens from it instead of the current buffer. /// - /// ILEnd specifies the location of the ')' for a function-like macro or the - /// identifier for an object-like macro. + /// \param Args specifies the tokens input to a function-like macro. + /// \param ILEnd specifies the location of the ')' for a function-like macro + /// or the identifier for an object-like macro. void EnterMacro(Token &Identifier, SourceLocation ILEnd, MacroInfo *Macro, MacroArgs *Args); - /// EnterTokenStream - Add a "macro" context to the top of the include stack, + /// \brief Add a "macro" context to the top of the include stack, /// which will cause the lexer to start returning the specified tokens. /// - /// If DisableMacroExpansion is true, tokens lexed from the token stream will - /// not be subject to further macro expansion. Otherwise, these tokens will - /// be re-macro-expanded when/if expansion is enabled. - /// - /// If OwnsTokens is false, this method assumes that the specified stream of - /// tokens has a permanent owner somewhere, so they do not need to be copied. - /// If it is true, it assumes the array of tokens is allocated with new[] and - /// must be freed. + /// If \p DisableMacroExpansion is true, tokens lexed from the token stream + /// will not be subject to further macro expansion. Otherwise, these tokens + /// will be re-macro-expanded when/if expansion is enabled. /// + /// If \p OwnsTokens is false, this method assumes that the specified stream + /// of tokens has a permanent owner somewhere, so they do not need to be + /// copied. If it is true, it assumes the array of tokens is allocated with + /// \c new[] and must be freed. void EnterTokenStream(const Token *Toks, unsigned NumToks, bool DisableMacroExpansion, bool OwnsTokens); - /// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the - /// lexer stack. This should only be used in situations where the current - /// state of the top-of-stack lexer is known. + /// \brief Pop the current lexer/macro exp off the top of the lexer stack. + /// + /// This should only be used in situations where the current state of the + /// top-of-stack lexer is known. void RemoveTopOfLexerStack(); - /// EnableBacktrackAtThisPos - From the point that this method is called, and - /// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor + /// From the point that this method is called, and until + /// CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor /// keeps track of the lexed tokens so that a subsequent Backtrack() call will /// make the Preprocessor re-lex the same tokens. /// @@ -708,18 +760,18 @@ public: /// void EnableBacktrackAtThisPos(); - /// CommitBacktrackedTokens - Disable the last EnableBacktrackAtThisPos call. + /// \brief Disable the last EnableBacktrackAtThisPos call. void CommitBacktrackedTokens(); - /// Backtrack - Make Preprocessor re-lex the tokens that were lexed since + /// \brief Make Preprocessor re-lex the tokens that were lexed since /// EnableBacktrackAtThisPos() was previously called. void Backtrack(); - /// isBacktrackEnabled - True if EnableBacktrackAtThisPos() was called and + /// \brief True if EnableBacktrackAtThisPos() was called and /// caching of tokens is on. bool isBacktrackEnabled() const { return !BacktrackPositions.empty(); } - /// Lex - Lex the next token for this preprocessor. + /// \brief Lex the next token for this preprocessor. void Lex(Token &Result); void LexAfterModuleImport(Token &Result); @@ -743,17 +795,18 @@ public: const char *DiagnosticTag, bool AllowMacroExpansion); - /// LexNonComment - Lex a token. If it's a comment, keep lexing until we get - /// something not a comment. This is useful in -E -C mode where comments - /// would foul up preprocessor directive handling. + /// \brief Lex a token. If it's a comment, keep lexing until we get + /// something not a comment. + /// + /// This is useful in -E -C mode where comments would foul up preprocessor + /// directive handling. void LexNonComment(Token &Result) { do Lex(Result); while (Result.getKind() == tok::comment); } - /// LexUnexpandedToken - This is just like Lex, but this disables macro - /// expansion of identifier tokens. + /// \brief Just like Lex, but disables macro expansion of identifier tokens. void LexUnexpandedToken(Token &Result) { // Disable macro expansion. bool OldVal = DisableMacroExpansion; @@ -765,24 +818,31 @@ public: DisableMacroExpansion = OldVal; } - /// LexUnexpandedNonComment - Like LexNonComment, but this disables macro - /// expansion of identifier tokens. + /// \brief Like LexNonComment, but this disables macro expansion of + /// identifier tokens. void LexUnexpandedNonComment(Token &Result) { do LexUnexpandedToken(Result); while (Result.getKind() == tok::comment); } + /// \brief Parses a simple integer literal to get its numeric value. Floating + /// point literals and user defined literals are rejected. Used primarily to + /// handle pragmas that accept integer arguments. + bool parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value); + /// Disables macro expansion everywhere except for preprocessor directives. void SetMacroExpansionOnlyInDirectives() { DisableMacroExpansion = true; MacroExpansionInDirectivesOverride = true; } - /// LookAhead - This peeks ahead N tokens and returns that token without - /// consuming any tokens. LookAhead(0) returns the next token that would be - /// returned by Lex(), LookAhead(1) returns the token after it, etc. This - /// returns normal tokens after phase 5. As such, it is equivalent to using + /// \brief Peeks ahead N tokens and returns that token without consuming any + /// tokens. + /// + /// LookAhead(0) returns the next token that would be returned by Lex(), + /// LookAhead(1) returns the token after it, etc. This returns normal + /// tokens after phase 5. As such, it is equivalent to using /// 'Lex', not 'LexUnexpandedToken'. const Token &LookAhead(unsigned N) { if (CachedLexPos + N < CachedTokens.size()) @@ -791,8 +851,9 @@ public: return PeekAhead(N+1); } - /// RevertCachedTokens - When backtracking is enabled and tokens are cached, + /// \brief When backtracking is enabled and tokens are cached, /// this allows to revert a specific number of tokens. + /// /// Note that the number of tokens being reverted should be up to the last /// backtrack position, not more. void RevertCachedTokens(unsigned N) { @@ -805,20 +866,21 @@ public: CachedLexPos -= N; } - /// EnterToken - Enters a token in the token stream to be lexed next. If - /// BackTrack() is called afterwards, the token will remain at the insertion - /// point. + /// \brief Enters a token in the token stream to be lexed next. + /// + /// If BackTrack() is called afterwards, the token will remain at the + /// insertion point. void EnterToken(const Token &Tok) { EnterCachingLexMode(); CachedTokens.insert(CachedTokens.begin()+CachedLexPos, Tok); } - /// AnnotateCachedTokens - We notify the Preprocessor that if it is caching - /// tokens (because backtrack is enabled) it should replace the most recent - /// cached tokens with the given annotation token. This function has no effect - /// if backtracking is not enabled. + /// We notify the Preprocessor that if it is caching tokens (because + /// backtrack is enabled) it should replace the most recent cached tokens + /// with the given annotation token. This function has no effect if + /// backtracking is not enabled. /// - /// Note that the use of this function is just for optimization; so that the + /// Note that the use of this function is just for optimization, so that the /// cached tokens doesn't get re-parsed and re-resolved after a backtrack is /// invoked. void AnnotateCachedTokens(const Token &Tok) { @@ -848,7 +910,7 @@ public: CachedTokens[CachedLexPos-1] = Tok; } - /// TypoCorrectToken - Update the current token to represent the provided + /// Update the current token to represent the provided /// identifier, in order to cache an action performed by typo correction. void TypoCorrectToken(const Token &Tok) { assert(Tok.getIdentifierInfo() && "Expected identifier token"); @@ -886,14 +948,16 @@ public: unsigned Line, unsigned Column); /// \brief Determine if we are performing code completion. - bool isCodeCompletionEnabled() const { return CodeCompletionFile != 0; } + bool isCodeCompletionEnabled() const { return CodeCompletionFile != nullptr; } /// \brief Returns the location of the code-completion point. + /// /// Returns an invalid location if code-completion is not enabled or the file /// containing the code-completion point has not been lexed yet. SourceLocation getCodeCompletionLoc() const { return CodeCompletionLoc; } /// \brief Returns the start location of the file of code-completion point. + /// /// Returns an invalid location if code-completion is not enabled or the file /// containing the code-completion point has not been lexed yet. SourceLocation getCodeCompletionFileLoc() const { @@ -913,8 +977,9 @@ public: } /// \brief The location of the currently-active \#pragma clang - /// arc_cf_code_audited begin. Returns an invalid location if there - /// is no such pragma active. + /// arc_cf_code_audited begin. + /// + /// Returns an invalid location if there is no such pragma active. SourceLocation getPragmaARCCFCodeAuditedLoc() const { return PragmaARCCFCodeAuditedLoc; } @@ -936,7 +1001,7 @@ public: SkipMainFilePreamble.second = StartOfLine; } - /// Diag - Forwarding function for diagnostics. This emits a diagnostic at + /// Forwarding function for diagnostics. This emits a diagnostic at /// the specified Token's location, translating the token's start /// position in the current buffer into a SourcePosition object for rendering. DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) const { @@ -947,7 +1012,7 @@ public: return Diags->Report(Tok.getLocation(), DiagID); } - /// getSpelling() - Return the 'spelling' of the token at the given + /// Return the 'spelling' of the token at the given /// location; does not go up to the spelling location or down to the /// expansion location. /// @@ -956,25 +1021,28 @@ public: /// \param invalid If non-null, will be set \c true if an error occurs. StringRef getSpelling(SourceLocation loc, SmallVectorImpl<char> &buffer, - bool *invalid = 0) const { + bool *invalid = nullptr) const { return Lexer::getSpelling(loc, buffer, SourceMgr, LangOpts, invalid); } - /// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a - /// token is the characters used to represent the token in the source file - /// after trigraph expansion and escaped-newline folding. In particular, this - /// wants to get the true, uncanonicalized, spelling of things like digraphs - /// UCNs, etc. + /// \brief Return the 'spelling' of the Tok token. + /// + /// The spelling of a token is the characters used to represent the token in + /// the source file after trigraph expansion and escaped-newline folding. In + /// particular, this wants to get the true, uncanonicalized, spelling of + /// things like digraphs, UCNs, etc. /// /// \param Invalid If non-null, will be set \c true if an error occurs. - std::string getSpelling(const Token &Tok, bool *Invalid = 0) const { + std::string getSpelling(const Token &Tok, bool *Invalid = nullptr) const { return Lexer::getSpelling(Tok, SourceMgr, LangOpts, Invalid); } - /// getSpelling - This method is used to get the spelling of a token into a - /// preallocated buffer, instead of as an std::string. The caller is required - /// to allocate enough space for the token, which is guaranteed to be at least - /// Tok.getLength() bytes long. The length of the actual result is returned. + /// \brief Get the spelling of a token into a preallocated buffer, instead + /// of as an std::string. + /// + /// The caller is required to allocate enough space for the token, which is + /// guaranteed to be at least Tok.getLength() bytes long. The length of the + /// actual result is returned. /// /// Note that this method may do two possible things: it may either fill in /// the buffer specified with characters, or it may *change the input pointer* @@ -982,16 +1050,17 @@ public: /// copy). The caller is not allowed to modify the returned buffer pointer /// if an internal buffer is returned. unsigned getSpelling(const Token &Tok, const char *&Buffer, - bool *Invalid = 0) const { + bool *Invalid = nullptr) const { return Lexer::getSpelling(Tok, Buffer, SourceMgr, LangOpts, Invalid); } - /// getSpelling - This method is used to get the spelling of a token into a - /// SmallVector. Note that the returned StringRef may not point to the + /// \brief Get the spelling of a token into a SmallVector. + /// + /// Note that the returned StringRef may not point to the /// supplied buffer if a copy can be avoided. StringRef getSpelling(const Token &Tok, SmallVectorImpl<char> &Buffer, - bool *Invalid = 0) const; + bool *Invalid = nullptr) const; /// \brief Relex the token at the specified location. /// \returns true if there was a failure, false on success. @@ -1000,10 +1069,11 @@ public: return Lexer::getRawToken(Loc, Result, SourceMgr, LangOpts, IgnoreWhiteSpace); } - /// getSpellingOfSingleCharacterNumericConstant - Tok is a numeric constant - /// with length 1, return the character. - char getSpellingOfSingleCharacterNumericConstant(const Token &Tok, - bool *Invalid = 0) const { + /// \brief Given a Token \p Tok that is a numeric constant with length 1, + /// return the character. + char + getSpellingOfSingleCharacterNumericConstant(const Token &Tok, + bool *Invalid = nullptr) const { assert(Tok.is(tok::numeric_constant) && Tok.getLength() == 1 && "Called on unsupported token"); assert(!Tok.needsCleaning() && "Token can't need cleaning with length 1"); @@ -1019,18 +1089,21 @@ public: /// \brief Retrieve the name of the immediate macro expansion. /// - /// This routine starts from a source location, and finds the name of the macro - /// responsible for its immediate expansion. It looks through any intervening - /// macro argument expansions to compute this. It returns a StringRef which - /// refers to the SourceManager-owned buffer of the source where that macro - /// name is spelled. Thus, the result shouldn't out-live the SourceManager. + /// This routine starts from a source location, and finds the name of the + /// macro responsible for its immediate expansion. It looks through any + /// intervening macro argument expansions to compute this. It returns a + /// StringRef that refers to the SourceManager-owned buffer of the source + /// where that macro name is spelled. Thus, the result shouldn't out-live + /// the SourceManager. StringRef getImmediateMacroName(SourceLocation Loc) { return Lexer::getImmediateMacroName(Loc, SourceMgr, getLangOpts()); } - /// CreateString - Plop the specified string into a scratch buffer and set the - /// specified token's location and length to it. If specified, the source - /// location provides a location of the expansion point of the token. + /// \brief Plop the specified string into a scratch buffer and set the + /// specified token's location and length to it. + /// + /// If specified, the source location provides a location of the expansion + /// point of the token. void CreateString(StringRef Str, Token &Tok, SourceLocation ExpansionLocStart = SourceLocation(), SourceLocation ExpansionLocEnd = SourceLocation()); @@ -1060,7 +1133,7 @@ public: /// \param MacroBegin If non-null and function returns true, it is set to /// begin location of the macro. bool isAtStartOfMacroExpansion(SourceLocation loc, - SourceLocation *MacroBegin = 0) const { + SourceLocation *MacroBegin = nullptr) const { return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts, MacroBegin); } @@ -1071,27 +1144,26 @@ public: /// \param MacroEnd If non-null and function returns true, it is set to /// end location of the macro. bool isAtEndOfMacroExpansion(SourceLocation loc, - SourceLocation *MacroEnd = 0) const { + SourceLocation *MacroEnd = nullptr) const { return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd); } - /// DumpToken - Print the token to stderr, used for debugging. - /// + /// \brief Print the token to stderr, used for debugging. void DumpToken(const Token &Tok, bool DumpFlags = false) const; void DumpLocation(SourceLocation Loc) const; void DumpMacro(const MacroInfo &MI) const; - /// AdvanceToTokenCharacter - Given a location that specifies the start of a + /// \brief Given a location that specifies the start of a /// token, return a new location that specifies a character within the token. SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart, unsigned Char) const { return Lexer::AdvanceToTokenCharacter(TokStart, Char, SourceMgr, LangOpts); } - /// IncrementPasteCounter - Increment the counters for the number of token - /// paste operations performed. If fast was specified, this is a 'fast paste' - /// case we handled. + /// \brief Increment the counters for the number of token paste operations + /// performed. /// + /// If fast was specified, this is a 'fast paste' case we handled. void IncrementPasteCounter(bool isFast) { if (isFast) ++NumFastTokenPaste; @@ -1103,16 +1175,16 @@ public: size_t getTotalMemory() const; - /// HandleMicrosoftCommentPaste - When the macro expander pastes together a - /// comment (/##/) in microsoft mode, this method handles updating the current - /// state, returning the token on the next source line. + /// When the macro expander pastes together a comment (/##/) in Microsoft + /// mode, this method handles updating the current state, returning the + /// token on the next source line. void HandleMicrosoftCommentPaste(Token &Tok); //===--------------------------------------------------------------------===// // Preprocessor callback methods. These are invoked by a lexer as various // directives and events are found. - /// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the + /// Given a tok::raw_identifier token, look up the /// identifier information for the token and install it into the token, /// updating the token kind accordingly. IdentifierInfo *LookUpIdentifierInfo(Token &Identifier) const; @@ -1122,14 +1194,13 @@ private: public: - // SetPoisonReason - Call this function to indicate the reason for - // poisoning an identifier. If that identifier is accessed while - // poisoned, then this reason will be used instead of the default - // "poisoned" diagnostic. + /// \brief Specifies the reason for poisoning an identifier. + /// + /// If that identifier is accessed while poisoned, then this reason will be + /// used instead of the default "poisoned" diagnostic. void SetPoisonReason(IdentifierInfo *II, unsigned DiagID); - // HandlePoisonedIdentifier - Display reason for poisoned - // identifier. + /// \brief Display reason for poisoned identifier. void HandlePoisonedIdentifier(Token & Tok); void MaybeHandlePoisonedIdentifier(Token & Identifier) { @@ -1155,45 +1226,53 @@ private: IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; + + const char *getCurLexerEndPos(); + public: void PoisonSEHIdentifiers(bool Poison = true); // Borland - /// HandleIdentifier - This callback is invoked when the lexer reads an - /// identifier and has filled in the tokens IdentifierInfo member. This - /// callback potentially macro expands it or turns it into a named token (like - /// 'for'). + /// \brief Callback invoked when the lexer reads an identifier and has + /// filled in the tokens IdentifierInfo member. + /// + /// This callback potentially macro expands it or turns it into a named + /// token (like 'for'). /// /// \returns true if we actually computed a token, false if we need to /// lex again. bool HandleIdentifier(Token &Identifier); - /// HandleEndOfFile - This callback is invoked when the lexer hits the end of - /// the current file. This either returns the EOF token and returns true, or + /// \brief Callback invoked when the lexer hits the end of the current file. + /// + /// This either returns the EOF token and returns true, or /// pops a level off the include stack and returns false, at which point the /// client should call lex again. bool HandleEndOfFile(Token &Result, bool isEndOfMacro = false); - /// HandleEndOfTokenLexer - This callback is invoked when the current - /// TokenLexer hits the end of its token stream. + /// \brief Callback invoked when the current TokenLexer hits the end of its + /// token stream. bool HandleEndOfTokenLexer(Token &Result); - /// HandleDirective - This callback is invoked when the lexer sees a # token - /// at the start of a line. This consumes the directive, modifies the - /// lexer/preprocessor state, and advances the lexer(s) so that the next token - /// read is the correct one. + /// \brief Callback invoked when the lexer sees a # token at the start of a + /// line. + /// + /// This consumes the directive, modifies the lexer/preprocessor state, and + /// advances the lexer(s) so that the next token read is the correct one. void HandleDirective(Token &Result); - /// CheckEndOfDirective - Ensure that the next token is a tok::eod token. If - /// not, emit a diagnostic and consume up until the eod. If EnableMacros is - /// true, then we consider macros that expand to zero tokens as being ok. + /// \brief Ensure that the next token is a tok::eod token. + /// + /// If not, emit a diagnostic and consume up until the eod. + /// If \p EnableMacros is true, then we consider macros that expand to zero + /// tokens as being ok. void CheckEndOfDirective(const char *Directive, bool EnableMacros = false); - /// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the - /// current line until the tok::eod token is found. + /// \brief Read and discard all tokens remaining on the current line until + /// the tok::eod token is found. void DiscardUntilEndOfDirective(); - /// SawDateOrTime - This returns true if the preprocessor has seen a use of + /// \brief Returns true if the preprocessor has seen a use of /// __DATE__ or __TIME__ in the file so far. bool SawDateOrTime() const { return DATELoc != SourceLocation() || TIMELoc != SourceLocation(); @@ -1234,17 +1313,20 @@ public: ModuleMap::KnownHeader *SuggestedModule, bool SkipCache = false); - /// GetCurLookup - The DirectoryLookup structure used to find the current - /// FileEntry, if CurLexer is non-null and if applicable. This allows us to - /// implement \#include_next and find directory-specific properties. + /// \brief Get the DirectoryLookup structure used to find the current + /// FileEntry, if CurLexer is non-null and if applicable. + /// + /// This allows us to implement \#include_next and find directory-specific + /// properties. const DirectoryLookup *GetCurDirLookup() { return CurDirLookup; } /// \brief Return true if we're in the top-level file, not in a \#include. bool isInPrimaryFile() const; - /// ConcatenateIncludeName - Handle cases where the \#include name is expanded - /// from a macro as multiple tokens, which need to be glued together. This - /// occurs for code like: + /// \brief Handle cases where the \#include name is expanded + /// from a macro as multiple tokens, which need to be glued together. + /// + /// This occurs for code like: /// \code /// \#define FOO <x/y.h> /// \#include FOO @@ -1257,28 +1339,28 @@ public: bool ConcatenateIncludeName(SmallString<128> &FilenameBuffer, SourceLocation &End); - /// LexOnOffSwitch - Lex an on-off-switch (C99 6.10.6p2) and verify that it is + /// \brief Lex an on-off-switch (C99 6.10.6p2) and verify that it is /// followed by EOD. Return true if the token is not a valid on-off-switch. bool LexOnOffSwitch(tok::OnOffSwitch &OOS); + bool CheckMacroName(Token &MacroNameTok, char isDefineUndef); + private: void PushIncludeMacroStack() { - IncludeMacroStack.push_back(IncludeStackInfo(CurLexerKind, - CurLexer.take(), - CurPTHLexer.take(), - CurPPLexer, - CurTokenLexer.take(), - CurDirLookup)); - CurPPLexer = 0; + IncludeMacroStack.push_back(IncludeStackInfo( + CurLexerKind, CurSubmodule, std::move(CurLexer), std::move(CurPTHLexer), + CurPPLexer, std::move(CurTokenLexer), CurDirLookup)); + CurPPLexer = nullptr; } void PopIncludeMacroStack() { - CurLexer.reset(IncludeMacroStack.back().TheLexer); - CurPTHLexer.reset(IncludeMacroStack.back().ThePTHLexer); + CurLexer = std::move(IncludeMacroStack.back().TheLexer); + CurPTHLexer = std::move(IncludeMacroStack.back().ThePTHLexer); CurPPLexer = IncludeMacroStack.back().ThePPLexer; - CurTokenLexer.reset(IncludeMacroStack.back().TheTokenLexer); + CurTokenLexer = std::move(IncludeMacroStack.back().TheTokenLexer); CurDirLookup = IncludeMacroStack.back().TheDirLookup; + CurSubmodule = IncludeMacroStack.back().TheSubmodule; CurLexerKind = IncludeMacroStack.back().CurLexerKind; IncludeMacroStack.pop_back(); } @@ -1300,15 +1382,16 @@ private: /// This memory will be reused for allocating new MacroInfo objects. void ReleaseMacroInfo(MacroInfo* MI); - /// ReadMacroName - Lex and validate a macro name, which occurs after a - /// \#define or \#undef. This emits a diagnostic, sets the token kind to eod, + /// \brief Lex and validate a macro name, which occurs after a + /// \#define or \#undef. + /// + /// This emits a diagnostic, sets the token kind to eod, /// and discards the rest of the macro line if the macro name is invalid. void ReadMacroName(Token &MacroNameTok, char isDefineUndef = 0); - /// ReadMacroDefinitionArgList - The ( starting an argument list of a macro - /// definition has just been read. Lex the rest of the arguments and the - /// closing ), updating MI with what we learn and saving in LastTok the - /// last token read. + /// The ( starting an argument list of a macro definition has just been read. + /// Lex the rest of the arguments and the closing ), updating \p MI with + /// what we learn and saving in \p LastTok the last token read. /// Return true if an error occurs parsing the arg list. bool ReadMacroDefinitionArgList(MacroInfo *MI, Token& LastTok); @@ -1327,21 +1410,22 @@ private: /// \brief A fast PTH version of SkipExcludedConditionalBlock. void PTHSkipExcludedConditionalBlock(); - /// EvaluateDirectiveExpression - Evaluate an integer constant expression that - /// may occur after a #if or #elif directive and return it as a bool. If the - /// expression is equivalent to "!defined(X)" return X in IfNDefMacro. + /// \brief Evaluate an integer constant expression that may occur after a + /// \#if or \#elif directive and return it as a bool. + /// + /// If the expression is equivalent to "!defined(X)" return X in IfNDefMacro. bool EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro); - /// RegisterBuiltinPragmas - Install the standard preprocessor pragmas: + /// \brief Install the standard preprocessor pragmas: /// \#pragma GCC poison/system_header/dependency and \#pragma once. void RegisterBuiltinPragmas(); /// \brief Register builtin macros such as __LINE__ with the identifier table. void RegisterBuiltinMacros(); - /// HandleMacroExpandedIdentifier - If an identifier token is read that is to - /// be expanded as a macro, handle it and return the next token as 'Tok'. If - /// we lexed a token, return true; otherwise the caller should lex again. + /// If an identifier token is read that is to be expanded as a macro, handle + /// it and return the next token as 'Tok'. If we lexed a token, return true; + /// otherwise the caller should lex again. bool HandleMacroExpandedIdentifier(Token &Tok, MacroDirective *MD); /// \brief Cache macro expanded tokens for TokenLexers. @@ -1354,52 +1438,51 @@ private: void removeCachedMacroExpandedTokensOfLastLexer(); friend void TokenLexer::ExpandFunctionArguments(); - /// isNextPPTokenLParen - Determine whether the next preprocessor token to be + /// Determine whether the next preprocessor token to be /// lexed is a '('. If so, consume the token and return true, if not, this /// method should have no observable side-effect on the lexed tokens. bool isNextPPTokenLParen(); - /// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is - /// invoked to read all of the formal arguments specified for the macro - /// invocation. This returns null on error. + /// After reading "MACRO(", this method is invoked to read all of the formal + /// arguments specified for the macro invocation. Returns null on error. MacroArgs *ReadFunctionLikeMacroArgs(Token &MacroName, MacroInfo *MI, SourceLocation &ExpansionEnd); - /// ExpandBuiltinMacro - If an identifier token is read that is to be expanded + /// \brief If an identifier token is read that is to be expanded /// as a builtin macro, handle it and return the next token as 'Tok'. void ExpandBuiltinMacro(Token &Tok); - /// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then - /// return the first token after the directive. The _Pragma token has just - /// been read into 'Tok'. + /// \brief Read a \c _Pragma directive, slice it up, process it, then + /// return the first token after the directive. + /// This assumes that the \c _Pragma token has just been read into \p Tok. void Handle_Pragma(Token &Tok); - /// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text - /// is not enclosed within a string literal. + /// \brief Like Handle_Pragma except the pragma text is not enclosed within + /// a string literal. void HandleMicrosoft__pragma(Token &Tok); - /// EnterSourceFileWithLexer - Add a lexer to the top of the include stack and + /// \brief Add a lexer to the top of the include stack and /// start lexing tokens from it instead of the current buffer. void EnterSourceFileWithLexer(Lexer *TheLexer, const DirectoryLookup *Dir); - /// EnterSourceFileWithPTH - Add a lexer to the top of the include stack and + /// \brief Add a lexer to the top of the include stack and /// start getting tokens from it using the PTH cache. void EnterSourceFileWithPTH(PTHLexer *PL, const DirectoryLookup *Dir); - /// \brief Set the file ID for the preprocessor predefines. + /// \brief Set the FileID for the preprocessor predefines. void setPredefinesFileID(FileID FID) { assert(PredefinesFileID.isInvalid() && "PredefinesFileID already set!"); PredefinesFileID = FID; } - /// IsFileLexer - Returns true if we are lexing from a file and not a - /// pragma or a macro. + /// \brief Returns true if we are lexing from a file and not a + /// pragma or a macro. static bool IsFileLexer(const Lexer* L, const PreprocessorLexer* P) { - return L ? !L->isPragmaLexer() : P != 0; + return L ? !L->isPragmaLexer() : P != nullptr; } static bool IsFileLexer(const IncludeStackInfo& I) { - return IsFileLexer(I.TheLexer, I.ThePPLexer); + return IsFileLexer(I.TheLexer.get(), I.ThePPLexer); } bool IsFileLexer() const { @@ -1437,7 +1520,7 @@ private: // File inclusion. void HandleIncludeDirective(SourceLocation HashLoc, Token &Tok, - const DirectoryLookup *LookupFrom = 0, + const DirectoryLookup *LookupFrom = nullptr, bool isImport = false); void HandleIncludeNextDirective(SourceLocation HashLoc, Token &Tok); void HandleIncludeMacrosDirective(SourceLocation HashLoc, Token &Tok); @@ -1449,25 +1532,6 @@ private: /// points to. Module *getModuleForLocation(SourceLocation FilenameLoc); - /// \brief Verify that a private header is included only from within its - /// module. - bool violatesPrivateInclude(Module *RequestingModule, - const FileEntry *IncFileEnt, - ModuleMap::ModuleHeaderRole Role, - Module *RequestedModule); - - /// \brief Verify that a module includes headers only from modules that it - /// has declared that it uses. - bool violatesUseDeclarations(Module *RequestingModule, - Module *RequestedModule); - - /// \brief Verify that it is legal for the source file that \p FilenameLoc - /// points to to include the file \p Filename. - /// - /// Tries to reuse \p IncFileEnt. - void verifyModuleInclude(SourceLocation FilenameLoc, StringRef Filename, - const FileEntry *IncFileEnt); - // Macro handling. void HandleDefineDirective(Token &Tok, bool ImmediatelyAfterTopLevelIfndef); void HandleUndefDirective(Token &Tok); |