summaryrefslogtreecommitdiffstats
path: root/lib/Lex
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Lex')
-rw-r--r--lib/Lex/Lexer.cpp42
-rw-r--r--lib/Lex/LiteralSupport.cpp2
-rw-r--r--lib/Lex/PPDirectives.cpp20
-rw-r--r--lib/Lex/PPMacroExpansion.cpp4
-rw-r--r--lib/Lex/Pragma.cpp3
-rw-r--r--lib/Lex/Preprocessor.cpp31
6 files changed, 51 insertions, 51 deletions
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 0a74b26..afd1ba8 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -210,6 +210,7 @@ void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) {
}
}
+static bool isWhitespace(unsigned char c);
/// MeasureTokenLength - Relex the token at the specified location and return
/// its length in bytes in the input file. If the token needs cleaning (e.g.
@@ -231,6 +232,9 @@ unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
std::pair<const char *,const char *> Buffer = SM.getBufferData(LocInfo.first);
const char *StrData = Buffer.first+LocInfo.second;
+ if (isWhitespace(StrData[0]))
+ return 0;
+
// Create a lexer starting at the beginning of this token.
Lexer TheLexer(Loc, LangOpts, Buffer.first, StrData, Buffer.second);
TheLexer.SetCommentRetentionState(true);
@@ -902,8 +906,10 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
// SkipBCPLComment - We have just read the // characters from input. Skip until
// we find the newline character thats terminate the comment. Then update
-/// BufferPtr and return. If we're in KeepCommentMode, this will form the token
-/// and return true.
+/// BufferPtr and return.
+///
+/// If we're in KeepCommentMode or any CommentHandler has inserted
+/// some tokens, this will store the first token and return true.
bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
// If BCPL comments aren't explicitly enabled for this language, emit an
// extension warning.
@@ -980,9 +986,12 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
} while (C != '\n' && C != '\r');
// Found but did not consume the newline.
- if (PP)
- PP->HandleComment(SourceRange(getSourceLocation(BufferPtr),
- getSourceLocation(CurPtr)));
+ if (PP && PP->HandleComment(Result,
+ SourceRange(getSourceLocation(BufferPtr),
+ getSourceLocation(CurPtr)))) {
+ BufferPtr = CurPtr;
+ return true; // A token has to be returned.
+ }
// If we are returning comments as tokens, return this comment as a token.
if (inKeepCommentMode())
@@ -1108,8 +1117,8 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
/// happen is the comment could end with an escaped newline between the */ end
/// of comment.
///
-/// If KeepCommentMode is enabled, this forms a token from the comment and
-/// returns true.
+/// If we're in KeepCommentMode or any CommentHandler has inserted
+/// some tokens, this will store the first token and return true.
bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
// Scan one character past where we should, looking for a '/' character. Once
// we find it, check to see if it was preceeded by a *. This common
@@ -1226,9 +1235,12 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
C = *CurPtr++;
}
- if (PP)
- PP->HandleComment(SourceRange(getSourceLocation(BufferPtr),
- getSourceLocation(CurPtr)));
+ if (PP && PP->HandleComment(Result,
+ SourceRange(getSourceLocation(BufferPtr),
+ getSourceLocation(CurPtr)))) {
+ BufferPtr = CurPtr;
+ return true; // A token has to be returned.
+ }
// If we are returning comments as tokens, return this comment as a token.
if (inKeepCommentMode()) {
@@ -1606,10 +1618,12 @@ LexNextToken:
// too (without going through the big switch stmt).
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
Features.BCPLComment) {
- SkipBCPLComment(Result, CurPtr+2);
+ if (SkipBCPLComment(Result, CurPtr+2))
+ return; // There is a token to return.
goto SkipIgnoredUnits;
} else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
- SkipBlockComment(Result, CurPtr+2);
+ if (SkipBlockComment(Result, CurPtr+2))
+ return; // There is a token to return.
goto SkipIgnoredUnits;
} else if (isHorizontalWhitespace(*CurPtr)) {
goto SkipHorizontalWhitespace;
@@ -1795,7 +1809,7 @@ LexNextToken:
if (Features.BCPLComment ||
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') {
if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
- return; // KeepCommentMode
+ return; // There is a token to return.
// It is common for the tokens immediately after a // comment to be
// whitespace (indentation for the next line). Instead of going through
@@ -1806,7 +1820,7 @@ LexNextToken:
if (Char == '*') { // /**/ comment.
if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
- return; // KeepCommentMode
+ return; // There is a token to return.
goto LexNextToken; // GCC isn't tail call eliminating.
}
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index 5cd5497..004e675 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -375,7 +375,7 @@ NumericLiteralParser(const char *begin, const char *end,
continue; // Success.
case 'i':
if (PP.getLangOptions().Microsoft) {
- if (isFPConstant || isUnsigned || isLong || isLongLong) break;
+ if (isFPConstant || isLong || isLongLong) break;
// Allow i8, i16, i32, i64, and i128.
if (s + 1 != ThisTokEnd) {
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index aa807f8..b0e784b 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -160,10 +160,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
CurPPLexer->LexingRawMode = true;
Token Tok;
while (1) {
- if (CurLexer)
- CurLexer->Lex(Tok);
- else
- CurPTHLexer->Lex(Tok);
+ CurLexer->Lex(Tok);
// If this is the end of the buffer, we have an error.
if (Tok.is(tok::eof)) {
@@ -405,7 +402,6 @@ void Preprocessor::PTHSkipExcludedConditionalBlock() {
/// return null on failure. isAngled indicates whether the file reference is
/// for system #include's or not (i.e. using <> instead of "").
const FileEntry *Preprocessor::LookupFile(llvm::StringRef Filename,
- SourceLocation FilenameTokLoc,
bool isAngled,
const DirectoryLookup *FromDir,
const DirectoryLookup *&CurDir) {
@@ -432,16 +428,7 @@ const FileEntry *Preprocessor::LookupFile(llvm::StringRef Filename,
CurDir = CurDirLookup;
const FileEntry *FE =
HeaderInfo.LookupFile(Filename, isAngled, FromDir, CurDir, CurFileEnt);
- if (FE) {
- // Warn about normal quoted #include from framework headers. Since
- // framework headers are published (both public and private ones) they
- // should not do relative searches, they should do an include with the
- // framework path included.
- if (!isAngled && CurDir && FilenameTokLoc.isValid() &&
- CurDir->isFramework() && CurDir == CurDirLookup)
- Diag(FilenameTokLoc, diag::warn_pp_relative_include_from_framework);
- return FE;
- }
+ if (FE) return FE;
// Otherwise, see if this is a subframework header. If so, this is relative
// to one of the headers on the #include stack. Walk the list of the current
@@ -1082,8 +1069,7 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
// Search include directories.
const DirectoryLookup *CurDir;
- const FileEntry *File = LookupFile(Filename, FilenameTok.getLocation(),
- isAngled, LookupFrom, CurDir);
+ const FileEntry *File = LookupFile(Filename, isAngled, LookupFrom, CurDir);
if (File == 0) {
Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
return;
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 3792782..13aeb88 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -567,9 +567,7 @@ static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
// Search include directories.
const DirectoryLookup *CurDir;
- const FileEntry *File = PP.LookupFile(Filename,
- SourceLocation(),// produce no warnings.
- isAngled, LookupFrom, CurDir);
+ const FileEntry *File = PP.LookupFile(Filename, isAngled, LookupFrom, CurDir);
// Get the result value. Result = true means the file exists.
Result = File != 0;
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 856b3bd..63b23b6 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -301,8 +301,7 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
// Search include directories for this file.
const DirectoryLookup *CurDir;
- const FileEntry *File = LookupFile(Filename, FilenameTok.getLocation(),
- isAngled, 0, CurDir);
+ const FileEntry *File = LookupFile(Filename, isAngled, 0, CurDir);
if (File == 0) {
Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
return;
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 26bb3a9..5689baa 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -429,21 +429,17 @@ SourceLocation Preprocessor::AdvanceToTokenCharacter(SourceLocation TokStart,
return TokStart.getFileLocWithOffset(PhysOffset);
}
-/// \brief Computes the source location just past the end of the
-/// token at this source location.
-///
-/// This routine can be used to produce a source location that
-/// points just past the end of the token referenced by \p Loc, and
-/// is generally used when a diagnostic needs to point just after a
-/// token where it expected something different that it received. If
-/// the returned source location would not be meaningful (e.g., if
-/// it points into a macro), this routine returns an invalid
-/// source location.
-SourceLocation Preprocessor::getLocForEndOfToken(SourceLocation Loc) {
+SourceLocation Preprocessor::getLocForEndOfToken(SourceLocation Loc,
+ unsigned Offset) {
if (Loc.isInvalid() || !Loc.isFileID())
return SourceLocation();
unsigned Len = Lexer::MeasureTokenLength(Loc, getSourceManager(), Features);
+ if (Len > Offset)
+ Len = Len - Offset;
+ else
+ return Loc;
+
return AdvanceToTokenCharacter(Loc, Len);
}
@@ -583,11 +579,18 @@ void Preprocessor::RemoveCommentHandler(CommentHandler *Handler) {
CommentHandlers.erase(Pos);
}
-void Preprocessor::HandleComment(SourceRange Comment) {
+bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
+ bool AnyPendingTokens = false;
for (std::vector<CommentHandler *>::iterator H = CommentHandlers.begin(),
HEnd = CommentHandlers.end();
- H != HEnd; ++H)
- (*H)->HandleComment(*this, Comment);
+ H != HEnd; ++H) {
+ if ((*H)->HandleComment(*this, Comment))
+ AnyPendingTokens = true;
+ }
+ if (!AnyPendingTokens || getCommentRetentionState())
+ return false;
+ Lex(result);
+ return true;
}
CommentHandler::~CommentHandler() { }
OpenPOWER on IntegriCloud