summaryrefslogtreecommitdiffstats
path: root/lib/Lex
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Lex')
-rw-r--r--lib/Lex/Lexer.cpp292
-rw-r--r--lib/Lex/LiteralSupport.cpp44
-rw-r--r--lib/Lex/MacroInfo.cpp19
-rw-r--r--lib/Lex/Makefile1
-rw-r--r--lib/Lex/PPDirectives.cpp51
-rw-r--r--lib/Lex/PPExpressions.cpp19
-rw-r--r--lib/Lex/PPMacroExpansion.cpp24
-rw-r--r--lib/Lex/PTHLexer.cpp30
-rw-r--r--lib/Lex/Pragma.cpp270
-rw-r--r--lib/Lex/PreprocessingRecord.cpp3
-rw-r--r--lib/Lex/Preprocessor.cpp29
-rw-r--r--lib/Lex/TokenLexer.cpp18
12 files changed, 703 insertions, 97 deletions
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 91b14f6..917829b 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -27,7 +27,9 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cctype>
@@ -247,6 +249,200 @@ unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
return TheTok.getLength();
}
+SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ bool Invalid = false;
+ llvm::StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return Loc;
+
+ // Back up from the current location until we hit the beginning of a line
+ // (or the buffer). We'll relex from that point.
+ const char *BufStart = Buffer.data();
+ const char *StrData = BufStart+LocInfo.second;
+ if (StrData[0] == '\n' || StrData[0] == '\r')
+ return Loc;
+
+ const char *LexStart = StrData;
+ while (LexStart != BufStart) {
+ if (LexStart[0] == '\n' || LexStart[0] == '\r') {
+ ++LexStart;
+ break;
+ }
+
+ --LexStart;
+ }
+
+ // Create a lexer starting at the beginning of this token.
+ SourceLocation LexerStartLoc = Loc.getFileLocWithOffset(-LocInfo.second);
+ Lexer TheLexer(LexerStartLoc, LangOpts, BufStart, LexStart, Buffer.end());
+ TheLexer.SetCommentRetentionState(true);
+
+ // Lex tokens until we find the token that contains the source location.
+ Token TheTok;
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+
+ if (TheLexer.getBufferLocation() > StrData) {
+ // Lexing this token has taken the lexer past the source location we're
+ // looking for. If the current token encompasses our source location,
+ // return the beginning of that token.
+ if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
+ return TheTok.getLocation();
+
+ // We ended up skipping over the source location entirely, which means
+ // that it points into whitespace. We're done here.
+ break;
+ }
+ } while (TheTok.getKind() != tok::eof);
+
+ // We've passed our source location; just return the original source location.
+ return Loc;
+}
+
+namespace {
+ enum PreambleDirectiveKind {
+ PDK_Skipped,
+ PDK_StartIf,
+ PDK_EndIf,
+ PDK_Unknown
+ };
+}
+
+std::pair<unsigned, bool>
+Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer, unsigned MaxLines) {
+ // Create a lexer starting at the beginning of the file. Note that we use a
+ // "fake" file source location at offset 1 so that the lexer will track our
+ // position within the file.
+ const unsigned StartOffset = 1;
+ SourceLocation StartLoc = SourceLocation::getFromRawEncoding(StartOffset);
+ LangOptions LangOpts;
+ Lexer TheLexer(StartLoc, LangOpts, Buffer->getBufferStart(),
+ Buffer->getBufferStart(), Buffer->getBufferEnd());
+
+ bool InPreprocessorDirective = false;
+ Token TheTok;
+ Token IfStartTok;
+ unsigned IfCount = 0;
+ unsigned Line = 0;
+
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+
+ if (InPreprocessorDirective) {
+ // If we've hit the end of the file, we're done.
+ if (TheTok.getKind() == tok::eof) {
+ InPreprocessorDirective = false;
+ break;
+ }
+
+ // If we haven't hit the end of the preprocessor directive, skip this
+ // token.
+ if (!TheTok.isAtStartOfLine())
+ continue;
+
+ // We've passed the end of the preprocessor directive, and will look
+ // at this token again below.
+ InPreprocessorDirective = false;
+ }
+
+ // Keep track of the # of lines in the preamble.
+ if (TheTok.isAtStartOfLine()) {
+ ++Line;
+
+ // If we were asked to limit the number of lines in the preamble,
+ // and we're about to exceed that limit, we're done.
+ if (MaxLines && Line >= MaxLines)
+ break;
+ }
+
+ // Comments are okay; skip over them.
+ if (TheTok.getKind() == tok::comment)
+ continue;
+
+ if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
+ // This is the start of a preprocessor directive.
+ Token HashTok = TheTok;
+ InPreprocessorDirective = true;
+
+ // Figure out which direective this is. Since we're lexing raw tokens,
+ // we don't have an identifier table available. Instead, just look at
+ // the raw identifier to recognize and categorize preprocessor directives.
+ TheLexer.LexFromRawLexer(TheTok);
+ if (TheTok.getKind() == tok::identifier && !TheTok.needsCleaning()) {
+ const char *IdStart = Buffer->getBufferStart()
+ + TheTok.getLocation().getRawEncoding() - 1;
+ llvm::StringRef Keyword(IdStart, TheTok.getLength());
+ PreambleDirectiveKind PDK
+ = llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
+ .Case("include", PDK_Skipped)
+ .Case("__include_macros", PDK_Skipped)
+ .Case("define", PDK_Skipped)
+ .Case("undef", PDK_Skipped)
+ .Case("line", PDK_Skipped)
+ .Case("error", PDK_Skipped)
+ .Case("pragma", PDK_Skipped)
+ .Case("import", PDK_Skipped)
+ .Case("include_next", PDK_Skipped)
+ .Case("warning", PDK_Skipped)
+ .Case("ident", PDK_Skipped)
+ .Case("sccs", PDK_Skipped)
+ .Case("assert", PDK_Skipped)
+ .Case("unassert", PDK_Skipped)
+ .Case("if", PDK_StartIf)
+ .Case("ifdef", PDK_StartIf)
+ .Case("ifndef", PDK_StartIf)
+ .Case("elif", PDK_Skipped)
+ .Case("else", PDK_Skipped)
+ .Case("endif", PDK_EndIf)
+ .Default(PDK_Unknown);
+
+ switch (PDK) {
+ case PDK_Skipped:
+ continue;
+
+ case PDK_StartIf:
+ if (IfCount == 0)
+ IfStartTok = HashTok;
+
+ ++IfCount;
+ continue;
+
+ case PDK_EndIf:
+ // Mismatched #endif. The preamble ends here.
+ if (IfCount == 0)
+ break;
+
+ --IfCount;
+ continue;
+
+ case PDK_Unknown:
+ // We don't know what this directive is; stop at the '#'.
+ break;
+ }
+ }
+
+ // We only end up here if we didn't recognize the preprocessor
+ // directive or it was one that can't occur in the preamble at this
+ // point. Roll back the current token to the location of the '#'.
+ InPreprocessorDirective = false;
+ TheTok = HashTok;
+ }
+
+ // We hit a token that we don't recognize as being in the
+ // "preprocessing only" part of the file, so we're no longer in
+ // the preamble.
+ break;
+ } while (true);
+
+ SourceLocation End = IfCount? IfStartTok.getLocation() : TheTok.getLocation();
+ return std::make_pair(End.getRawEncoding() - StartLoc.getRawEncoding(),
+ IfCount? IfStartTok.isAtStartOfLine()
+ : TheTok.isAtStartOfLine());
+}
+
//===----------------------------------------------------------------------===//
// Character information.
//===----------------------------------------------------------------------===//
@@ -476,7 +672,7 @@ static char DecodeTrigraphChar(const char *CP, Lexer *L) {
}
if (!L->isLexingRawMode())
- L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res;
+ L->Diag(CP-2, diag::trigraph_converted) << llvm::StringRef(&Res, 1);
return Res;
}
@@ -647,6 +843,14 @@ Slash:
// Helper methods for lexing.
//===----------------------------------------------------------------------===//
+/// \brief Routine that indiscriminately skips bytes in the source file.
+void Lexer::SkipBytes(unsigned Bytes, bool StartOfLine) {
+ BufferPtr += Bytes;
+ if (BufferPtr > BufferEnd)
+ BufferPtr = BufferEnd;
+ IsAtStartOfLine = StartOfLine;
+}
+
void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
// Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
unsigned Size;
@@ -716,6 +920,16 @@ FinishIdentifier:
}
}
+/// isHexaLiteral - Return true if Start points to a hex constant.
+/// in microsoft mode (where this is supposed to be several different tokens).
+static bool isHexaLiteral(const char *Start, const LangOptions &Features) {
+ unsigned Size;
+ char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, Features);
+ if (C1 != '0')
+ return false;
+ char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, Features);
+ return (C2 == 'x' || C2 == 'X');
+}
/// LexNumericConstant - Lex the remainder of a integer or floating point
/// constant. From[-1] is the first character lexed. Return the end of the
@@ -731,12 +945,16 @@ void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
}
// If we fell out, check for a sign, due to 1e+12. If we have one, continue.
- if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
- return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
+ // If we are in Microsoft mode, don't continue if the constant is hex.
+ // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
+ if (!Features.Microsoft || !isHexaLiteral(BufferPtr, Features))
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ }
// If we have a hex FP constant, continue.
if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p') &&
- (!PP || !PP->getLangOptions().CPlusPlus0x))
+ !Features.CPlusPlus0x)
return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
// Update the location of token as well as BufferPtr.
@@ -759,7 +977,9 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) {
if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
- if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ if (C == 0 && PP && PP->isCodeCompletionFile(FileLoc))
+ PP->CodeCompleteNaturalLanguage();
+ else if (!isLexingRawMode() && !Features.AsmPreprocessor)
Diag(BufferPtr, diag::err_unterminated_string);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
@@ -836,7 +1056,9 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
C = getAndAdvanceChar(CurPtr, Result);
} else if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
- if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ if (C == 0 && PP && PP->isCodeCompletionFile(FileLoc))
+ PP->CodeCompleteNaturalLanguage();
+ else if (!isLexingRawMode() && !Features.AsmPreprocessor)
Diag(BufferPtr, diag::err_unterminated_char);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
@@ -980,7 +1202,13 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
}
}
- if (CurPtr == BufferEnd+1) { --CurPtr; break; }
+ if (CurPtr == BufferEnd+1) {
+ if (PP && PP->isCodeCompletionFile(FileLoc))
+ PP->CodeCompleteNaturalLanguage();
+
+ --CurPtr;
+ break;
+ }
} while (C != '\n' && C != '\r');
// Found but did not consume the newline. Notify comment handlers about the
@@ -1219,7 +1447,9 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
Diag(CurPtr-1, diag::warn_nested_block_comment);
}
} else if (C == 0 && CurPtr == BufferEnd+1) {
- if (!isLexingRawMode() && !PP->isCodeCompletionFile(FileLoc))
+ if (PP && PP->isCodeCompletionFile(FileLoc))
+ PP->CodeCompleteNaturalLanguage();
+ else if (!isLexingRawMode())
Diag(BufferPtr, diag::err_unterminated_block_comment);
// Note: the user probably forgot a */. We could continue immediately
// after the /*, but this would involve lexing a lot of what really is the
@@ -1305,6 +1535,11 @@ std::string Lexer::ReadToEndOfLine() {
// Next, lex the character, which should handle the EOM transition.
Lex(Tmp);
+ if (Tmp.is(tok::code_completion)) {
+ if (PP && PP->getCodeCompletionHandler())
+ PP->getCodeCompletionHandler()->CodeCompleteNaturalLanguage();
+ Lex(Tmp);
+ }
assert(Tmp.is(tok::eom) && "Unexpected token!");
// Finally, we're done, return the string we found.
@@ -1318,6 +1553,22 @@ std::string Lexer::ReadToEndOfLine() {
/// This returns true if Result contains a token, false if PP.Lex should be
/// called again.
bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
+ // Check if we are performing code completion.
+ if (PP && PP->isCodeCompletionFile(FileLoc)) {
+ // We're at the end of the file, but we've been asked to consider the
+ // end of the file to be a code-completion token. Return the
+ // code-completion token.
+ Result.startToken();
+ FormTokenWithChars(Result, CurPtr, tok::code_completion);
+
+ // Only do the eof -> code_completion translation once.
+ PP->SetCodeCompletionPoint(0, 0, 0);
+
+ // Silence any diagnostics that occur once we hit the code-completion point.
+ PP->getDiagnostics().setSuppressAllDiagnostics(true);
+ return true;
+ }
+
// If we hit the end of the file while parsing a preprocessor directive,
// end the preprocessor directive first. The next token returned will
// then be the end of file.
@@ -1340,29 +1591,14 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
FormTokenWithChars(Result, BufferEnd, tok::eof);
return true;
}
-
- // Otherwise, check if we are code-completing, then issue diagnostics for
- // unterminated #if and missing newline.
-
- if (PP && PP->isCodeCompletionFile(FileLoc)) {
- // We're at the end of the file, but we've been asked to consider the
- // end of the file to be a code-completion token. Return the
- // code-completion token.
- Result.startToken();
- FormTokenWithChars(Result, CurPtr, tok::code_completion);
-
- // Only do the eof -> code_completion translation once.
- PP->SetCodeCompletionPoint(0, 0, 0);
-
- // Silence any diagnostics that occur once we hit the code-completion point.
- PP->getDiagnostics().setSuppressAllDiagnostics(true);
- return true;
- }
+ // Issue diagnostics for unterminated #if and missing newline.
+
// If we are in a #if directive, emit an error.
while (!ConditionalStack.empty()) {
- PP->Diag(ConditionalStack.back().IfLoc,
- diag::err_pp_unterminated_conditional);
+ if (!PP->isCodeCompletionFile(FileLoc))
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
ConditionalStack.pop_back();
}
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index b8fd3ce..fb543d0 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -170,6 +170,7 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
char *&ResultBuf, bool &HadError,
SourceLocation Loc, Preprocessor &PP,
+ bool wide,
bool Complain) {
// FIXME: Add a warning - UCN's are only valid in C++ & C99.
// FIXME: Handle wide strings.
@@ -190,6 +191,7 @@ static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
UTF32 UcnVal = 0;
unsigned short UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
+ unsigned short UcnLenSave = UcnLen;
for (; ThisTokBuf != ThisTokEnd && UcnLen; ++ThisTokBuf, UcnLen--) {
int CharVal = HexDigitValue(ThisTokBuf[0]);
if (CharVal == -1) break;
@@ -214,6 +216,17 @@ static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
HadError = 1;
return;
}
+ if (wide) {
+ (void)UcnLenSave;
+ assert(UcnLenSave == 4 &&
+ "ProcessUCNEscape - only ucn length of 4 supported");
+ // little endian assumed.
+ *ResultBuf++ = (UcnVal & 0x000000FF);
+ *ResultBuf++ = (UcnVal & 0x0000FF00) >> 8;
+ *ResultBuf++ = (UcnVal & 0x00FF0000) >> 16;
+ *ResultBuf++ = (UcnVal & 0xFF000000) >> 24;
+ return;
+ }
// Now that we've parsed/checked the UCN, we convert from UTF32->UTF8.
// The conversion below was inspired by:
// http://www.unicode.org/Public/PROGRAMS/CVTUTF/ConvertUTF.c
@@ -323,7 +336,7 @@ NumericLiteralParser(const char *begin, const char *end,
// Done.
} else if (isxdigit(*s) && !(*s == 'e' || *s == 'E')) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
- diag::err_invalid_decimal_digit) << std::string(s, s+1);
+ diag::err_invalid_decimal_digit) << llvm::StringRef(s, 1);
hadError = true;
return;
} else if (*s == '.') {
@@ -439,7 +452,7 @@ NumericLiteralParser(const char *begin, const char *end,
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
isFPConstant ? diag::err_invalid_suffix_float_constant :
diag::err_invalid_suffix_integer_constant)
- << std::string(SuffixBegin, ThisTokEnd);
+ << llvm::StringRef(SuffixBegin, ThisTokEnd-SuffixBegin);
hadError = true;
return;
}
@@ -510,7 +523,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// Done.
} else if (isxdigit(*s)) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_binary_digit) << std::string(s, s+1);
+ diag::err_invalid_binary_digit) << llvm::StringRef(s, 1);
hadError = true;
}
// Other suffixes will be diagnosed by the caller.
@@ -540,7 +553,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// the code is using an incorrect base.
if (isxdigit(*s) && *s != 'e' && *s != 'E') {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_octal_digit) << std::string(s, s+1);
+ diag::err_invalid_octal_digit) << llvm::StringRef(s, 1);
hadError = true;
return;
}
@@ -830,12 +843,14 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
}
const char *ThisTokEnd = ThisTokBuf+ThisTokLen-1; // Skip end quote.
-
+ bool wide = false;
// TODO: Input character set mapping support.
// Skip L marker for wide strings.
- if (ThisTokBuf[0] == 'L')
+ if (ThisTokBuf[0] == 'L') {
+ wide = true;
++ThisTokBuf;
+ }
assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?");
++ThisTokBuf;
@@ -880,7 +895,8 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
// Is this a Universal Character Name escape?
if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
ProcessUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr,
- hadError, StringToks[i].getLocation(), PP, Complain);
+ hadError, StringToks[i].getLocation(), PP, wide,
+ Complain);
continue;
}
// Otherwise, this is a non-UCN escape character. Process it.
@@ -911,6 +927,20 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
hadError = 1;
return;
}
+ } else if (Complain) {
+ // Complain if this string literal has too many characters.
+ unsigned MaxChars = PP.getLangOptions().CPlusPlus? 65536
+ : PP.getLangOptions().C99 ? 4095
+ : 509;
+
+ if (GetNumStringChars() > MaxChars)
+ PP.Diag(StringToks[0].getLocation(), diag::ext_string_too_long)
+ << GetNumStringChars() << MaxChars
+ << (PP.getLangOptions().CPlusPlus? 2
+ : PP.getLangOptions().C99 ? 1
+ : 0)
+ << SourceRange(StringToks[0].getLocation(),
+ StringToks[NumStringToks-1].getLocation());
}
}
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index fda884c..c6d0934 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -20,13 +20,32 @@ MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
IsC99Varargs = false;
IsGNUVarargs = false;
IsBuiltinMacro = false;
+ IsFromAST = false;
IsDisabled = false;
IsUsed = true;
+ IsAllowRedefinitionsWithoutWarning = false;
ArgumentList = 0;
NumArguments = 0;
}
+MacroInfo::MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator) {
+ Location = MI.Location;
+ EndLocation = MI.EndLocation;
+ ReplacementTokens = MI.ReplacementTokens;
+ IsFunctionLike = MI.IsFunctionLike;
+ IsC99Varargs = MI.IsC99Varargs;
+ IsGNUVarargs = MI.IsGNUVarargs;
+ IsBuiltinMacro = MI.IsBuiltinMacro;
+ IsFromAST = MI.IsFromAST;
+ IsDisabled = MI.IsDisabled;
+ IsUsed = MI.IsUsed;
+ IsAllowRedefinitionsWithoutWarning = MI.IsAllowRedefinitionsWithoutWarning;
+ ArgumentList = 0;
+ NumArguments = 0;
+ setArgumentList(MI.ArgumentList, MI.NumArguments, PPAllocator);
+}
+
/// isIdenticalTo - Return true if the specified macro definition is equal to
/// this macro in spelling, arguments, and whitespace. This is used to emit
/// duplicate definition warnings. This implements the rules in C99 6.10.3.
diff --git a/lib/Lex/Makefile b/lib/Lex/Makefile
index 938b8d5..d80fb55 100644
--- a/lib/Lex/Makefile
+++ b/lib/Lex/Makefile
@@ -15,7 +15,6 @@ CLANG_LEVEL := ../..
include $(CLANG_LEVEL)/../../Makefile.config
LIBRARYNAME := clangLex
-BUILD_ARCHIVE = 1
ifeq ($(ARCH),PowerPC)
CXX.Flags += -maltivec
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 417724b..8da7def 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -16,6 +16,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/APInt.h"
@@ -25,7 +26,7 @@ using namespace clang;
// Utility Methods for Preprocessor Directive Handling.
//===----------------------------------------------------------------------===//
-MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
+MacroInfo *Preprocessor::AllocateMacroInfo() {
MacroInfo *MI;
if (!MICache.empty()) {
@@ -33,15 +34,26 @@ MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
MICache.pop_back();
} else
MI = (MacroInfo*) BP.Allocate<MacroInfo>();
+ return MI;
+}
+
+MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
+ MacroInfo *MI = AllocateMacroInfo();
new (MI) MacroInfo(L);
return MI;
}
+MacroInfo *Preprocessor::CloneMacroInfo(const MacroInfo &MacroToClone) {
+ MacroInfo *MI = AllocateMacroInfo();
+ new (MI) MacroInfo(MacroToClone, BP);
+ return MI;
+}
+
/// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
/// be reused for allocating new MacroInfo objects.
-void Preprocessor::ReleaseMacroInfo(MacroInfo* MI) {
+void Preprocessor::ReleaseMacroInfo(MacroInfo *MI) {
MICache.push_back(MI);
- MI->FreeArgumentList(BP);
+ MI->FreeArgumentList();
}
@@ -63,6 +75,13 @@ void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
// Read the token, don't allow macro expansion on it.
LexUnexpandedToken(MacroNameTok);
+ if (MacroNameTok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteMacroName(isDefineUndef == 1);
+ LexUnexpandedToken(MacroNameTok);
+ return;
+ }
+
// Missing macro name?
if (MacroNameTok.is(tok::eom)) {
Diag(MacroNameTok, diag::err_pp_missing_macro_name);
@@ -166,13 +185,20 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
while (1) {
CurLexer->Lex(Tok);
+ if (Tok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteInConditionalExclusion();
+ continue;
+ }
+
// If this is the end of the buffer, we have an error.
if (Tok.is(tok::eof)) {
// Emit errors for each unterminated conditional on the stack, including
// the current one.
while (!CurPPLexer->ConditionalStack.empty()) {
- Diag(CurPPLexer->ConditionalStack.back().IfLoc,
- diag::err_pp_unterminated_conditional);
+ if (!isCodeCompletionFile(Tok.getLocation()))
+ Diag(CurPPLexer->ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
CurPPLexer->ConditionalStack.pop_back();
}
@@ -510,7 +536,11 @@ TryAgain:
// Handle stuff like "# /*foo*/ define X" in -E -C mode.
LexUnexpandedToken(Result);
goto TryAgain;
-
+ case tok::code_completion:
+ if (CodeComplete)
+ CodeComplete->CodeCompleteDirective(
+ CurPPLexer->getConditionalStackDepth() > 0);
+ return;
case tok::numeric_constant: // # 7 GNU line marker directive.
if (getLangOptions().AsmPreprocessor)
break; // # 4 is not a preprocessor directive in .S files.
@@ -1445,15 +1475,15 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
if (!OtherMI->isUsed())
Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
- // Macros must be identical. This means all tokes and whitespace
+ // Macros must be identical. This means all tokens and whitespace
// separation must be the same. C99 6.10.3.2.
- if (!MI->isIdenticalTo(*OtherMI, *this)) {
+ if (!OtherMI->isAllowRedefinitionsWithoutWarning() &&
+ !MI->isIdenticalTo(*OtherMI, *this)) {
Diag(MI->getDefinitionLoc(), diag::ext_pp_macro_redef)
<< MacroNameTok.getIdentifierInfo();
Diag(OtherMI->getDefinitionLoc(), diag::note_previous_definition);
}
}
-
ReleaseMacroInfo(OtherMI);
}
@@ -1490,7 +1520,8 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) {
// If the callbacks want to know, tell them about the macro #undef.
if (Callbacks)
- Callbacks->MacroUndefined(MacroNameTok.getIdentifierInfo(), MI);
+ Callbacks->MacroUndefined(MacroNameTok.getLocation(),
+ MacroNameTok.getIdentifierInfo(), MI);
// Free macro definition.
ReleaseMacroInfo(MI);
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 756ce27..163e869 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -19,11 +19,14 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LexDiagnostic.h"
#include "llvm/ADT/APSInt.h"
using namespace clang;
+namespace {
+
/// PPValue - Represents the value of a subexpression of a preprocessor
/// conditional and the source range covered by it.
class PPValue {
@@ -47,6 +50,8 @@ public:
void setEnd(SourceLocation L) { Range.setEnd(L); }
};
+}
+
static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
Token &PeekTok, bool ValueLive,
Preprocessor &PP);
@@ -88,6 +93,12 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
PP.LexUnexpandedToken(PeekTok);
}
+ if (PeekTok.is(tok::code_completion)) {
+ if (PP.getCodeCompletionHandler())
+ PP.getCodeCompletionHandler()->CodeCompleteMacroName(false);
+ PP.LexUnexpandedToken(PeekTok);
+ }
+
// If we don't have a pp-identifier now, this is an error.
if ((II = PeekTok.getIdentifierInfo()) == 0) {
PP.Diag(PeekTok, diag::err_pp_defined_requires_identifier);
@@ -138,6 +149,12 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
bool ValueLive, Preprocessor &PP) {
DT.State = DefinedTracker::Unknown;
+ if (PeekTok.is(tok::code_completion)) {
+ if (PP.getCodeCompletionHandler())
+ PP.getCodeCompletionHandler()->CodeCompletePreprocessorExpression();
+ PP.LexUnexpandedToken(PeekTok);
+ }
+
// If this token's spelling is a pp-identifier, check to see if it is
// 'defined' or if it is a macro. Note that we check here because many
// keywords are pp-identifiers, so we can't check the kind.
@@ -693,7 +710,7 @@ EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Peek ahead one token.
Token Tok;
Lex(Tok);
-
+
// C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
unsigned BitWidth = getTargetInfo().getIntMaxTWidth();
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index ebf606e..9015c27 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
@@ -71,6 +72,12 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
Ident__has_include = RegisterBuiltinMacro(*this, "__has_include");
Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
+
+ // Microsoft Extensions.
+ if (Features.Microsoft)
+ Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
+ else
+ Ident__pragma = 0;
}
/// isTrivialSingleTokenExpansion - Return true if MI, which has a single token
@@ -323,6 +330,13 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
// an argument value in a macro could expand to ',' or '(' or ')'.
LexUnexpandedToken(Tok);
+ if (Tok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteMacroArgument(MacroName.getIdentifierInfo(),
+ MI, NumActuals);
+ LexUnexpandedToken(Tok);
+ }
+
if (Tok.is(tok::eof) || Tok.is(tok::eom)) { // "#if f(<eof>" & "#if f(\n"
Diag(MacroName, diag::err_unterm_macro_invoc);
// Do not lose the EOF/EOM. Return it to the client.
@@ -506,6 +520,10 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("cxx_static_assert", LangOpts.CPlusPlus0x)
.Case("objc_nonfragile_abi", LangOpts.ObjCNonFragileABI)
.Case("objc_weak_class", LangOpts.ObjCNonFragileABI)
+ .Case("ownership_holds", true)
+ .Case("ownership_returns", true)
+ .Case("ownership_takes", true)
+ .Case("cxx_inline_namespaces", true)
//.Case("cxx_concepts", false)
//.Case("cxx_lambdas", false)
//.Case("cxx_nullptr", false)
@@ -630,10 +648,12 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
IdentifierInfo *II = Tok.getIdentifierInfo();
assert(II && "Can't be a macro without id info!");
- // If this is an _Pragma directive, expand it, invoke the pragma handler, then
- // lex the token after it.
+ // If this is an _Pragma or Microsoft __pragma directive, expand it,
+ // invoke the pragma handler, then lex the token after it.
if (II == Ident_Pragma)
return Handle_Pragma(Tok);
+ else if (II == Ident__pragma) // in non-MS mode this is null
+ return HandleMicrosoft__pragma(Tok);
++NumBuiltinMacroExpanded;
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index 3b949d0..63b4823 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -101,16 +101,15 @@ LexNextToken:
// Save the end-of-file token.
EofToken = Tok;
+ // Save 'PP' to 'PPCache' as LexEndOfFile can delete 'this'.
Preprocessor *PPCache = PP;
assert(!ParsingPreprocessorDirective);
assert(!LexingRawMode);
-
- // FIXME: Issue diagnostics similar to Lexer.
- if (PP->HandleEndOfFile(Tok, false))
+
+ if (LexEndOfFile(Tok))
return;
- assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
return PPCache->Lex(Tok);
}
@@ -134,6 +133,29 @@ LexNextToken:
MIOpt.ReadToken();
}
+bool PTHLexer::LexEndOfFile(Token &Result) {
+ // If we hit the end of the file while parsing a preprocessor directive,
+ // end the preprocessor directive first. The next token returned will
+ // then be the end of file.
+ if (ParsingPreprocessorDirective) {
+ ParsingPreprocessorDirective = false; // Done parsing the "line".
+ return true; // Have a token.
+ }
+
+ assert(!LexingRawMode);
+
+ // If we are in a #if directive, emit an error.
+ while (!ConditionalStack.empty()) {
+ if (!PP->isCodeCompletionFile(FileStartLoc))
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ ConditionalStack.pop_back();
+ }
+
+ // Finally, let the preprocessor handle this.
+ return PP->HandleEndOfFile(Result);
+}
+
// FIXME: We can just grab the last token instead of storing a copy
// into EofToken.
void PTHLexer::getEOF(Token& Tok) {
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 7bf4094..a7b289e 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -16,9 +16,12 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
using namespace clang;
@@ -166,6 +169,62 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
--e;
}
}
+
+ Handle_Pragma(StrVal, PragmaLoc, RParenLoc);
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+/// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text
+/// is not enclosed within a string literal.
+void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
+ // Remember the pragma token location.
+ SourceLocation PragmaLoc = Tok.getLocation();
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ // Get the tokens enclosed within the __pragma().
+ llvm::SmallVector<Token, 32> PragmaToks;
+ int NumParens = 0;
+ Lex(Tok);
+ while (Tok.isNot(tok::eof)) {
+ if (Tok.is(tok::l_paren))
+ NumParens++;
+ else if (Tok.is(tok::r_paren) && NumParens-- == 0)
+ break;
+ PragmaToks.push_back(Tok);
+ Lex(Tok);
+ }
+
+ if (Tok.is(tok::eof)) {
+ Diag(PragmaLoc, diag::err_unterminated___pragma);
+ return;
+ }
+
+ // Build the pragma string.
+ std::string StrVal = " ";
+ for (llvm::SmallVector<Token, 32>::iterator I =
+ PragmaToks.begin(), E = PragmaToks.end(); I != E; ++I) {
+ StrVal += getSpelling(*I);
+ }
+
+ SourceLocation RParenLoc = Tok.getLocation();
+
+ Handle_Pragma(StrVal, PragmaLoc, RParenLoc);
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+void Preprocessor::Handle_Pragma(const std::string &StrVal,
+ SourceLocation PragmaLoc,
+ SourceLocation RParenLoc) {
// Plop the string (including the newline and trailing null) into a buffer
// where we can lex it.
@@ -183,9 +242,6 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
// With everything set up, lex this as a #pragma directive.
HandlePragmaDirective();
-
- // Finally, return whatever came after the pragma directive.
- return Lex(Tok);
}
@@ -328,7 +384,9 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
Lex(DependencyTok);
}
- Message.erase(Message.end()-1);
+ // Remove the trailing ' ' if present.
+ if (!Message.empty())
+ Message.erase(Message.end()-1);
Diag(FilenameTok, diag::pp_out_of_date_dependency) << Message;
}
}
@@ -483,6 +541,109 @@ void Preprocessor::HandlePragmaMessage(Token &Tok) {
Callbacks->PragmaMessage(MessageLoc, MessageString);
}
+/// ParsePragmaPushOrPopMacro - Handle parsing of pragma push_macro/pop_macro.
+/// Return the IdentifierInfo* associated with the macro to push or pop.
+IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
+ // Remember the pragma token location.
+ Token PragmaTok = Tok;
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return 0;
+ }
+
+ // Read the macro name string.
+ Lex(Tok);
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return 0;
+ }
+
+ // Remember the macro string.
+ std::string StrVal = getSpelling(Tok);
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return 0;
+ }
+
+ assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
+ "Invalid string token!");
+
+ // Create a Token from the string.
+ Token MacroTok;
+ MacroTok.startToken();
+ MacroTok.setKind(tok::identifier);
+ CreateString(&StrVal[1], StrVal.size() - 2, MacroTok);
+
+ // Get the IdentifierInfo of MacroToPushTok.
+ return LookUpIdentifierInfo(MacroTok);
+}
+
+/// HandlePragmaPushMacro - Handle #pragma push_macro.
+/// The syntax is:
+/// #pragma push_macro("macro")
+void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
+ // Parse the pragma directive and get the macro IdentifierInfo*.
+ IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PushMacroTok);
+ if (!IdentInfo) return;
+
+ // Get the MacroInfo associated with IdentInfo.
+ MacroInfo *MI = getMacroInfo(IdentInfo);
+
+ MacroInfo *MacroCopyToPush = 0;
+ if (MI) {
+ // Make a clone of MI.
+ MacroCopyToPush = CloneMacroInfo(*MI);
+
+ // Allow the original MacroInfo to be redefined later.
+ MI->setIsAllowRedefinitionsWithoutWarning(true);
+ }
+
+ // Push the cloned MacroInfo so we can retrieve it later.
+ PragmaPushMacroInfo[IdentInfo].push_back(MacroCopyToPush);
+}
+
+/// HandlePragmaPopMacro - Handle #pragma push_macro.
+/// The syntax is:
+/// #pragma pop_macro("macro")
+void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
+ SourceLocation MessageLoc = PopMacroTok.getLocation();
+
+ // Parse the pragma directive and get the macro IdentifierInfo*.
+ IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PopMacroTok);
+ if (!IdentInfo) return;
+
+ // Find the vector<MacroInfo*> associated with the macro.
+ llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> >::iterator iter =
+ PragmaPushMacroInfo.find(IdentInfo);
+ if (iter != PragmaPushMacroInfo.end()) {
+ // Release the MacroInfo currently associated with IdentInfo.
+ MacroInfo *CurrentMI = getMacroInfo(IdentInfo);
+ if (CurrentMI) ReleaseMacroInfo(CurrentMI);
+
+ // Get the MacroInfo we want to reinstall.
+ MacroInfo *MacroToReInstall = iter->second.back();
+
+ // Reinstall the previously pushed macro.
+ setMacroInfo(IdentInfo, MacroToReInstall);
+
+ // Pop PragmaPushMacroInfo stack.
+ iter->second.pop_back();
+ if (iter->second.size() == 0)
+ PragmaPushMacroInfo.erase(iter);
+ } else {
+ Diag(MessageLoc, diag::warn_pragma_pop_macro_no_push)
+ << IdentInfo->getName();
+ }
+}
/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
/// If 'Namespace' is non-null, then it is a token required to exist on the
@@ -582,24 +743,51 @@ struct PragmaDependencyHandler : public PragmaHandler {
}
};
+struct PragmaDebugHandler : public PragmaHandler {
+ PragmaDebugHandler() : PragmaHandler("__debug") {}
+ virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (II->isStr("assert")) {
+ assert(0 && "This is an assertion!");
+ } else if (II->isStr("crash")) {
+ *(volatile int*) 0x11 = 0;
+ } else if (II->isStr("llvm_fatal_error")) {
+ llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
+ } else if (II->isStr("llvm_unreachable")) {
+ llvm_unreachable("#pragma clang __debug llvm_unreachable");
+ } else if (II->isStr("overflow_stack")) {
+ DebugOverflowStack();
+ } else if (II->isStr("handle_crash")) {
+ llvm::CrashRecoveryContext *CRC =llvm::CrashRecoveryContext::GetCurrent();
+ if (CRC)
+ CRC->HandleCrash();
+ } else {
+ PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
+ << II->getName();
+ }
+ }
+
+ void DebugOverflowStack() {
+ DebugOverflowStack();
+ }
+};
+
/// PragmaDiagnosticHandler - e.g. '#pragma GCC diagnostic ignored "-Wformat"'
-/// Since clang's diagnostic supports extended functionality beyond GCC's
-/// the constructor takes a clangMode flag to tell it whether or not to allow
-/// clang's extended functionality, or whether to reject it.
struct PragmaDiagnosticHandler : public PragmaHandler {
-private:
- const bool ClangMode;
public:
- explicit PragmaDiagnosticHandler(const bool clangMode)
- : PragmaHandler("diagnostic"), ClangMode(clangMode) {}
-
+ explicit PragmaDiagnosticHandler() : PragmaHandler("diagnostic") {}
virtual void HandlePragma(Preprocessor &PP, Token &DiagToken) {
Token Tok;
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::identifier)) {
- unsigned Diag = ClangMode ? diag::warn_pragma_diagnostic_clang_invalid
- : diag::warn_pragma_diagnostic_gcc_invalid;
- PP.Diag(Tok, Diag);
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
return;
}
IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -613,22 +801,16 @@ public:
Map = diag::MAP_IGNORE;
else if (II->isStr("fatal"))
Map = diag::MAP_FATAL;
- else if (ClangMode) {
- if (II->isStr("pop")) {
- if (!PP.getDiagnostics().popMappings())
- PP.Diag(Tok, diag::warn_pragma_diagnostic_clang_cannot_ppp);
- return;
- }
-
- if (II->isStr("push")) {
- PP.getDiagnostics().pushMappings();
- return;
- }
-
- PP.Diag(Tok, diag::warn_pragma_diagnostic_clang_invalid);
+ else if (II->isStr("pop")) {
+ if (!PP.getDiagnostics().popMappings())
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
+
+ return;
+ } else if (II->isStr("push")) {
+ PP.getDiagnostics().pushMappings();
return;
} else {
- PP.Diag(Tok, diag::warn_pragma_diagnostic_gcc_invalid);
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
return;
}
@@ -660,9 +842,7 @@ public:
if (Literal.hadError)
return;
if (Literal.Pascal) {
- unsigned Diag = ClangMode ? diag::warn_pragma_diagnostic_clang_invalid
- : diag::warn_pragma_diagnostic_gcc_invalid;
- PP.Diag(Tok, Diag);
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
return;
}
@@ -699,6 +879,25 @@ struct PragmaMessageHandler : public PragmaHandler {
}
};
+/// PragmaPushMacroHandler - "#pragma push_macro" saves the value of the
+/// macro on the top of the stack.
+struct PragmaPushMacroHandler : public PragmaHandler {
+ PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
+ virtual void HandlePragma(Preprocessor &PP, Token &PushMacroTok) {
+ PP.HandlePragmaPushMacro(PushMacroTok);
+ }
+};
+
+
+/// PragmaPopMacroHandler - "#pragma pop_macro" sets the value of the
+/// macro to the value on the top of the stack.
+struct PragmaPopMacroHandler : public PragmaHandler {
+ PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
+ virtual void HandlePragma(Preprocessor &PP, Token &PopMacroTok) {
+ PP.HandlePragmaPopMacro(PopMacroTok);
+ }
+};
+
// Pragma STDC implementations.
enum STDCSetting {
@@ -780,17 +979,20 @@ struct PragmaSTDC_UnknownHandler : public PragmaHandler {
void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler(new PragmaOnceHandler());
AddPragmaHandler(new PragmaMarkHandler());
+ AddPragmaHandler(new PragmaPushMacroHandler());
+ AddPragmaHandler(new PragmaPopMacroHandler());
// #pragma GCC ...
AddPragmaHandler("GCC", new PragmaPoisonHandler());
AddPragmaHandler("GCC", new PragmaSystemHeaderHandler());
AddPragmaHandler("GCC", new PragmaDependencyHandler());
- AddPragmaHandler("GCC", new PragmaDiagnosticHandler(false));
+ AddPragmaHandler("GCC", new PragmaDiagnosticHandler());
// #pragma clang ...
AddPragmaHandler("clang", new PragmaPoisonHandler());
AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("clang", new PragmaDebugHandler());
AddPragmaHandler("clang", new PragmaDependencyHandler());
- AddPragmaHandler("clang", new PragmaDiagnosticHandler(true));
+ AddPragmaHandler("clang", new PragmaDiagnosticHandler());
AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler());
AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index 6966c38..c446d96 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -118,7 +118,8 @@ void PreprocessingRecord::MacroDefined(const IdentifierInfo *II,
PreprocessedEntities.push_back(Def);
}
-void PreprocessingRecord::MacroUndefined(const IdentifierInfo *II,
+void PreprocessingRecord::MacroUndefined(SourceLocation Loc,
+ const IdentifierInfo *II,
const MacroInfo *MI) {
llvm::DenseMap<const MacroInfo *, MacroDefinition *>::iterator Pos
= MacroDefinitions.find(MI);
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 51f7293..5160acf 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -34,6 +34,7 @@
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/ScratchBuffer.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -53,8 +54,9 @@ Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
bool OwnsHeaders)
: Diags(&diags), Features(opts), Target(target),FileMgr(Headers.getFileMgr()),
SourceMgr(SM), HeaderInfo(Headers), ExternalSource(0),
- Identifiers(opts, IILookup), BuiltinInfo(Target), CodeCompletionFile(0),
- CurPPLexer(0), CurDirLookup(0), Callbacks(0), MacroArgCache(0), Record(0) {
+ Identifiers(opts, IILookup), BuiltinInfo(Target), CodeComplete(0),
+ CodeCompletionFile(0), SkipMainFilePreamble(0, true), CurPPLexer(0),
+ CurDirLookup(0), Callbacks(0), MacroArgCache(0), Record(0) {
ScratchBuf = new ScratchBuffer(SourceMgr);
CounterValue = 0; // __COUNTER__ starts at 0.
OwnsHeaderSearch = OwnsHeaders;
@@ -110,7 +112,7 @@ Preprocessor::~Preprocessor() {
// will be released when the BumpPtrAllocator 'BP' object gets
// destroyed. We still need to run the dtor, however, to free
// memory alocated by MacroInfo.
- I->second->Destroy(BP);
+ I->second->Destroy();
I->first->setHasMacroDefinition(false);
}
for (std::vector<MacroInfo*>::iterator I = MICache.begin(),
@@ -119,7 +121,7 @@ Preprocessor::~Preprocessor() {
// will be released when the BumpPtrAllocator 'BP' object gets
// destroyed. We still need to run the dtor, however, to free
// memory alocated by MacroInfo.
- (*I)->Destroy(BP);
+ (*I)->Destroy();
}
// Free any cached macro expanders.
@@ -163,7 +165,7 @@ void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
llvm::errs() << " [ExpandDisabled]";
if (Tok.needsCleaning()) {
const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
- llvm::errs() << " [UnClean='" << std::string(Start, Start+Tok.getLength())
+ llvm::errs() << " [UnClean='" << llvm::StringRef(Start, Tok.getLength())
<< "']";
}
@@ -282,6 +284,13 @@ bool Preprocessor::isCodeCompletionFile(SourceLocation FileLoc) const {
== CodeCompletionFile;
}
+void Preprocessor::CodeCompleteNaturalLanguage() {
+ SetCodeCompletionPoint(0, 0, 0);
+ getDiagnostics().setSuppressAllDiagnostics(true);
+ if (CodeComplete)
+ CodeComplete->CodeCompleteNaturalLanguage();
+}
+
//===----------------------------------------------------------------------===//
// Token Spelling
//===----------------------------------------------------------------------===//
@@ -508,6 +517,12 @@ void Preprocessor::EnterMainSourceFile() {
// Enter the main file source buffer.
EnterSourceFile(MainFileID, 0, SourceLocation());
+ // If we've been asked to skip bytes in the main file (e.g., as part of a
+ // precompiled preamble), do so now.
+ if (SkipMainFilePreamble.first > 0)
+ CurLexer->SkipBytes(SkipMainFilePreamble.first,
+ SkipMainFilePreamble.second);
+
// Tell the header info that the main file was entered. If the file is later
// #imported, it won't be re-entered.
if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
@@ -516,7 +531,7 @@ void Preprocessor::EnterMainSourceFile() {
// Preprocess Predefines to populate the initial preprocessor state.
llvm::MemoryBuffer *SB =
llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
- assert(SB && "Cannot fail to create predefined source buffer");
+ assert(SB && "Cannot create predefined source buffer");
FileID FID = SourceMgr.createFileIDForMemBuffer(SB);
assert(!FID.isInvalid() && "Could not create FileID for predefines?");
@@ -639,6 +654,8 @@ bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
CommentHandler::~CommentHandler() { }
+CodeCompletionHandler::~CodeCompletionHandler() { }
+
void Preprocessor::createPreprocessingRecord() {
if (Record)
return;
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index 56bb073..94719b0 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -268,6 +268,13 @@ void TokenLexer::ExpandFunctionArguments() {
// Remove the paste operator, report use of the extension.
PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
ResultToks.pop_back();
+
+ // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
+ // then removal of the comma should produce a placemarker token (in C99
+ // terms) which we model by popping off the previous ##, giving us a plain
+ // "X" when __VA_ARGS__ is empty.
+ if (!ResultToks.empty() && ResultToks.back().is(tok::hashhash))
+ ResultToks.pop_back();
}
continue;
}
@@ -478,7 +485,7 @@ bool TokenLexer::PasteTokens(Token &Tok) {
return true;
}
- // Do not emit the warning when preprocessing assembler code.
+ // Do not emit the error when preprocessing assembler code.
if (!PP.getLangOptions().AsmPreprocessor) {
// Explicitly convert the token location to have proper instantiation
// information so that the user knows where it came from.
@@ -486,8 +493,13 @@ bool TokenLexer::PasteTokens(Token &Tok) {
SourceLocation Loc =
SM.createInstantiationLoc(PasteOpLoc, InstantiateLocStart,
InstantiateLocEnd, 2);
- PP.Diag(Loc, diag::err_pp_bad_paste)
- << std::string(Buffer.begin(), Buffer.end());
+ // If we're in microsoft extensions mode, downgrade this from a hard
+ // error to a warning that defaults to an error. This allows
+ // disabling it.
+ PP.Diag(Loc,
+ PP.getLangOptions().Microsoft ? diag::err_pp_bad_paste_ms
+ : diag::err_pp_bad_paste)
+ << Buffer.str();
}
// Do not consume the RHS.
OpenPOWER on IntegriCloud