summaryrefslogtreecommitdiffstats
path: root/lib/Lex/PTHLexer.cpp
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2009-10-14 18:03:49 +0000
committerrdivacky <rdivacky@FreeBSD.org>2009-10-14 18:03:49 +0000
commit9092c3e0fa01f3139b016d05d267a89e3b07747a (patch)
tree137ebebcae16fb0ce7ab4af456992bbd8d22fced /lib/Lex/PTHLexer.cpp
parent4981926bf654fe5a2c3893f24ca44106b217e71e (diff)
downloadFreeBSD-src-9092c3e0fa01f3139b016d05d267a89e3b07747a.zip
FreeBSD-src-9092c3e0fa01f3139b016d05d267a89e3b07747a.tar.gz
Update clang to r84119.
Diffstat (limited to 'lib/Lex/PTHLexer.cpp')
-rw-r--r--lib/Lex/PTHLexer.cpp220
1 files changed, 110 insertions, 110 deletions
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index 916bdef..36ace8b 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -37,7 +37,7 @@ PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
const unsigned char *ppcond, PTHManager &PM)
: PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), LastHashTokPtr(0),
PPCond(ppcond), CurPPCondPtr(ppcond), PTHMgr(PM) {
-
+
FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
}
@@ -47,25 +47,25 @@ LexNextToken:
//===--------------------------------------==//
// Read the raw token data.
//===--------------------------------------==//
-
+
// Shadow CurPtr into an automatic variable.
- const unsigned char *CurPtrShadow = CurPtr;
+ const unsigned char *CurPtrShadow = CurPtr;
// Read in the data for the token.
unsigned Word0 = ReadLE32(CurPtrShadow);
uint32_t IdentifierID = ReadLE32(CurPtrShadow);
uint32_t FileOffset = ReadLE32(CurPtrShadow);
-
+
tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
uint32_t Len = Word0 >> 16;
CurPtr = CurPtrShadow;
-
+
//===--------------------------------------==//
// Construct the token itself.
//===--------------------------------------==//
-
+
Tok.startToken();
Tok.setKind(TKind);
Tok.setFlag(TFlags);
@@ -80,57 +80,57 @@ LexNextToken:
else if (IdentifierID) {
MIOpt.ReadToken();
IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
-
+
Tok.setIdentifierInfo(II);
-
+
// Change the kind of this identifier to the appropriate token kind, e.g.
// turning "for" into a keyword.
Tok.setKind(II->getTokenID());
-
+
if (II->isHandleIdentifierCase())
PP->HandleIdentifier(Tok);
return;
}
-
+
//===--------------------------------------==//
// Process the token.
//===--------------------------------------==//
-#if 0
+#if 0
SourceManager& SM = PP->getSourceManager();
- llvm::cerr << SM.getFileEntryForID(FileID)->getName()
+ llvm::errs() << SM.getFileEntryForID(FileID)->getName()
<< ':' << SM.getLogicalLineNumber(Tok.getLocation())
<< ':' << SM.getLogicalColumnNumber(Tok.getLocation())
<< '\n';
-#endif
+#endif
if (TKind == tok::eof) {
// Save the end-of-file token.
EofToken = Tok;
-
+
Preprocessor *PPCache = PP;
-
+
assert(!ParsingPreprocessorDirective);
assert(!LexingRawMode);
-
+
// FIXME: Issue diagnostics similar to Lexer.
if (PP->HandleEndOfFile(Tok, false))
return;
-
+
assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
return PPCache->Lex(Tok);
}
-
+
if (TKind == tok::hash && Tok.isAtStartOfLine()) {
LastHashTokPtr = CurPtr - DISK_TOKEN_SIZE;
assert(!LexingRawMode);
PP->HandleDirective(Tok);
-
+
if (PP->isCurrentLexer(this))
goto LexNextToken;
-
+
return PP->Lex(Tok);
}
-
+
if (TKind == tok::eom) {
assert(ParsingPreprocessorDirective);
ParsingPreprocessorDirective = false;
@@ -154,7 +154,7 @@ void PTHLexer::DiscardToEndOfLine() {
// We assume that if the preprocessor wishes to discard to the end of
// the line that it also means to end the current preprocessor directive.
ParsingPreprocessorDirective = false;
-
+
// Skip tokens by only peeking at their token kind and the flags.
// We don't need to actually reconstruct full tokens from the token buffer.
// This saves some copies and it also reduces IdentifierInfo* lookup.
@@ -163,7 +163,7 @@ void PTHLexer::DiscardToEndOfLine() {
// Read the token kind. Are we at the end of the file?
tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
if (x == tok::eof) break;
-
+
// Read the token flags. Are we at the start of the next line?
Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
if (y & Token::StartOfLine) break;
@@ -171,7 +171,7 @@ void PTHLexer::DiscardToEndOfLine() {
// Skip to the next token.
p += DISK_TOKEN_SIZE;
}
-
+
CurPtr = p;
}
@@ -179,18 +179,18 @@ void PTHLexer::DiscardToEndOfLine() {
bool PTHLexer::SkipBlock() {
assert(CurPPCondPtr && "No cached PP conditional information.");
assert(LastHashTokPtr && "No known '#' token.");
-
+
const unsigned char* HashEntryI = 0;
- uint32_t Offset;
+ uint32_t Offset;
uint32_t TableIdx;
-
+
do {
// Read the token offset from the side-table.
Offset = ReadLE32(CurPPCondPtr);
-
- // Read the target table index from the side-table.
+
+ // Read the target table index from the side-table.
TableIdx = ReadLE32(CurPPCondPtr);
-
+
// Compute the actual memory address of the '#' token data for this entry.
HashEntryI = TokBuf + Offset;
@@ -208,7 +208,7 @@ bool PTHLexer::SkipBlock() {
// Read where we should jump to.
uint32_t TmpOffset = ReadLE32(NextPPCondPtr);
const unsigned char* HashEntryJ = TokBuf + TmpOffset;
-
+
if (HashEntryJ <= LastHashTokPtr) {
// Jump directly to the next entry in the side table.
HashEntryI = HashEntryJ;
@@ -218,23 +218,23 @@ bool PTHLexer::SkipBlock() {
}
}
}
- while (HashEntryI < LastHashTokPtr);
+ while (HashEntryI < LastHashTokPtr);
assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
assert(TableIdx && "No jumping from #endifs.");
-
+
// Update our side-table iterator.
const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
assert(NextPPCondPtr >= CurPPCondPtr);
CurPPCondPtr = NextPPCondPtr;
-
+
// Read where we should jump to.
HashEntryI = TokBuf + ReadLE32(NextPPCondPtr);
uint32_t NextIdx = ReadLE32(NextPPCondPtr);
-
+
// By construction NextIdx will be zero if this is a #endif. This is useful
// to know to obviate lexing another token.
bool isEndif = NextIdx == 0;
-
+
// This case can occur when we see something like this:
//
// #if ...
@@ -243,7 +243,7 @@ bool PTHLexer::SkipBlock() {
//
// If we are skipping the first #if block it will be the case that CurPtr
// already points 'elif'. Just return.
-
+
if (CurPtr > HashEntryI) {
assert(CurPtr == HashEntryI + DISK_TOKEN_SIZE);
// Did we reach a #endif? If so, go ahead and consume that token as well.
@@ -251,13 +251,13 @@ bool PTHLexer::SkipBlock() {
CurPtr += DISK_TOKEN_SIZE*2;
else
LastHashTokPtr = HashEntryI;
-
+
return isEndif;
}
// Otherwise, we need to advance. Update CurPtr to point to the '#' token.
CurPtr = HashEntryI;
-
+
// Update the location of the last observed '#'. This is useful if we
// are skipping multiple blocks.
LastHashTokPtr = CurPtr;
@@ -265,7 +265,7 @@ bool PTHLexer::SkipBlock() {
// Skip the '#' token.
assert(((tok::TokenKind)*CurPtr) == tok::hash);
CurPtr += DISK_TOKEN_SIZE;
-
+
// Did we reach a #endif? If so, go ahead and consume that token as well.
if (isEndif) { CurPtr += DISK_TOKEN_SIZE*2; }
@@ -297,12 +297,12 @@ class VISIBILITY_HIDDEN PTHFileData {
public:
PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
: TokenOff(tokenOff), PPCondOff(ppCondOff) {}
-
- uint32_t getTokenOffset() const { return TokenOff; }
- uint32_t getPPCondOffset() const { return PPCondOff; }
+
+ uint32_t getTokenOffset() const { return TokenOff; }
+ uint32_t getPPCondOffset() const { return PPCondOff; }
};
-
-
+
+
class VISIBILITY_HIDDEN PTHFileLookupCommonTrait {
public:
typedef std::pair<unsigned char, const char*> internal_key_type;
@@ -310,84 +310,84 @@ public:
static unsigned ComputeHash(internal_key_type x) {
return BernsteinHash(x.second);
}
-
+
static std::pair<unsigned, unsigned>
ReadKeyDataLength(const unsigned char*& d) {
unsigned keyLen = (unsigned) ReadUnalignedLE16(d);
unsigned dataLen = (unsigned) *(d++);
return std::make_pair(keyLen, dataLen);
}
-
+
static internal_key_type ReadKey(const unsigned char* d, unsigned) {
unsigned char k = *(d++); // Read the entry kind.
return std::make_pair(k, (const char*) d);
}
};
-
+
class VISIBILITY_HIDDEN PTHFileLookupTrait : public PTHFileLookupCommonTrait {
public:
typedef const FileEntry* external_key_type;
typedef PTHFileData data_type;
-
+
static internal_key_type GetInternalKey(const FileEntry* FE) {
return std::make_pair((unsigned char) 0x1, FE->getName());
}
static bool EqualKey(internal_key_type a, internal_key_type b) {
return a.first == b.first && strcmp(a.second, b.second) == 0;
- }
-
- static PTHFileData ReadData(const internal_key_type& k,
- const unsigned char* d, unsigned) {
+ }
+
+ static PTHFileData ReadData(const internal_key_type& k,
+ const unsigned char* d, unsigned) {
assert(k.first == 0x1 && "Only file lookups can match!");
uint32_t x = ::ReadUnalignedLE32(d);
uint32_t y = ::ReadUnalignedLE32(d);
- return PTHFileData(x, y);
+ return PTHFileData(x, y);
}
};
class VISIBILITY_HIDDEN PTHStringLookupTrait {
public:
- typedef uint32_t
+ typedef uint32_t
data_type;
typedef const std::pair<const char*, unsigned>
external_key_type;
typedef external_key_type internal_key_type;
-
+
static bool EqualKey(const internal_key_type& a,
const internal_key_type& b) {
return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
: false;
}
-
+
static unsigned ComputeHash(const internal_key_type& a) {
return BernsteinHash(a.first, a.second);
}
-
+
// This hopefully will just get inlined and removed by the optimizer.
static const internal_key_type&
GetInternalKey(const external_key_type& x) { return x; }
-
+
static std::pair<unsigned, unsigned>
ReadKeyDataLength(const unsigned char*& d) {
return std::make_pair((unsigned) ReadUnalignedLE16(d), sizeof(uint32_t));
}
-
+
static std::pair<const char*, unsigned>
ReadKey(const unsigned char* d, unsigned n) {
assert(n >= 2 && d[n-1] == '\0');
return std::make_pair((const char*) d, n-1);
}
-
+
static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
unsigned) {
return ::ReadUnalignedLE32(d);
}
};
-
-} // end anonymous namespace
+
+} // end anonymous namespace
typedef OnDiskChainedHashTable<PTHFileLookupTrait> PTHFileLookup;
typedef OnDiskChainedHashTable<PTHStringLookupTrait> PTHStringIdLookup;
@@ -398,7 +398,7 @@ typedef OnDiskChainedHashTable<PTHStringLookupTrait> PTHStringIdLookup;
PTHManager::PTHManager(const llvm::MemoryBuffer* buf, void* fileLookup,
const unsigned char* idDataTable,
- IdentifierInfo** perIDCache,
+ IdentifierInfo** perIDCache,
void* stringIdLookup, unsigned numIds,
const unsigned char* spellingBase,
const char* originalSourceFile)
@@ -416,7 +416,7 @@ PTHManager::~PTHManager() {
static void InvalidPTH(Diagnostic *Diags, Diagnostic::Level level,
const char* Msg = 0) {
- if (!Diags) return;
+ if (!Diags) return;
if (!Msg) Msg = "Invalid or corrupted PTH file";
unsigned DiagID = Diags->getCustomDiagID(level, Msg);
Diags->Report(FullSourceLoc(), DiagID);
@@ -427,7 +427,7 @@ PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
// Memory map the PTH file.
llvm::OwningPtr<llvm::MemoryBuffer>
File(llvm::MemoryBuffer::getFile(file.c_str()));
-
+
if (!File) {
if (Diags) {
unsigned DiagID = Diags->getCustomDiagID(level,
@@ -437,7 +437,7 @@ PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
return 0;
}
-
+
// Get the buffer ranges and check if there are at least three 32-bit
// words at the end of the file.
const unsigned char* BufBeg = (unsigned char*)File->getBufferStart();
@@ -449,54 +449,54 @@ PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
InvalidPTH(Diags, level);
return 0;
}
-
+
// Read the PTH version.
const unsigned char *p = BufBeg + (sizeof("cfe-pth") - 1);
unsigned Version = ReadLE32(p);
-
+
if (Version != PTHManager::Version) {
InvalidPTH(Diags, level,
- Version < PTHManager::Version
+ Version < PTHManager::Version
? "PTH file uses an older PTH format that is no longer supported"
: "PTH file uses a newer PTH format that cannot be read");
return 0;
}
- // Compute the address of the index table at the end of the PTH file.
+ // Compute the address of the index table at the end of the PTH file.
const unsigned char *PrologueOffset = p;
-
+
if (PrologueOffset >= BufEnd) {
InvalidPTH(Diags, level);
return 0;
}
-
+
// Construct the file lookup table. This will be used for mapping from
// FileEntry*'s to cached tokens.
const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
const unsigned char* FileTable = BufBeg + ReadLE32(FileTableOffset);
-
+
if (!(FileTable > BufBeg && FileTable < BufEnd)) {
InvalidPTH(Diags, level);
return 0; // FIXME: Proper error diagnostic?
}
-
+
llvm::OwningPtr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
-
+
// Warn if the PTH file is empty. We still want to create a PTHManager
// as the PTH could be used with -include-pth.
if (FL->isEmpty())
InvalidPTH(Diags, level, "PTH file contains no cached source data");
-
+
// Get the location of the table mapping from persistent ids to the
// data needed to reconstruct identifiers.
const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
const unsigned char* IData = BufBeg + ReadLE32(IDTableOffset);
-
+
if (!(IData >= BufBeg && IData < BufEnd)) {
InvalidPTH(Diags, level);
return 0;
}
-
+
// Get the location of the hashtable mapping between strings and
// persistent IDs.
const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
@@ -508,7 +508,7 @@ PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
llvm::OwningPtr<PTHStringIdLookup> SL(PTHStringIdLookup::Create(StringIdTable,
BufBeg));
-
+
// Get the location of the spelling cache.
const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
const unsigned char* spellingBase = BufBeg + ReadLE32(spellingBaseOffset);
@@ -516,19 +516,19 @@ PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
InvalidPTH(Diags, level);
return 0;
}
-
+
// Get the number of IdentifierInfos and pre-allocate the identifier cache.
uint32_t NumIds = ReadLE32(IData);
-
+
// Pre-allocate the peristent ID -> IdentifierInfo* cache. We use calloc()
// so that we in the best case only zero out memory once when the OS returns
// us new pages.
IdentifierInfo** PerIDCache = 0;
-
+
if (NumIds) {
- PerIDCache = (IdentifierInfo**)calloc(NumIds, sizeof(*PerIDCache));
+ PerIDCache = (IdentifierInfo**)calloc(NumIds, sizeof(*PerIDCache));
if (!PerIDCache) {
- InvalidPTH(Diags, level,
+ InvalidPTH(Diags, level,
"Could not allocate memory for processing PTH file");
return 0;
}
@@ -537,8 +537,8 @@ PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
// Compute the address of the original source file.
const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
unsigned len = ReadUnalignedLE16(originalSourceBase);
- if (!len) originalSourceBase = 0;
-
+ if (!len) originalSourceBase = 0;
+
// Create the new PTHManager.
return new PTHManager(File.take(), FL.take(), IData, PerIDCache,
SL.take(), NumIds, spellingBase,
@@ -551,7 +551,7 @@ IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
const unsigned char* IDData =
(const unsigned char*)Buf->getBufferStart() + ReadLE32(TableEntry);
assert(IDData < (const unsigned char*)Buf->getBufferEnd());
-
+
// Allocate the object.
std::pair<IdentifierInfo,const unsigned char*> *Mem =
Alloc.Allocate<std::pair<IdentifierInfo,const unsigned char*> >();
@@ -559,7 +559,7 @@ IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
Mem->second = IDData;
assert(IDData[0] != '\0');
IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
-
+
// Store the new IdentifierInfo in the cache.
PerIDCache[PersistentID] = II;
assert(II->getName() && II->getName()[0] != '\0');
@@ -584,18 +584,18 @@ PTHLexer *PTHManager::CreateLexer(FileID FID) {
const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
if (!FE)
return 0;
-
+
// Lookup the FileEntry object in our file lookup data structure. It will
// return a variant that indicates whether or not there is an offset within
// the PTH file that contains cached tokens.
PTHFileLookup& PFL = *((PTHFileLookup*)FileLookup);
PTHFileLookup::iterator I = PFL.find(FE);
-
+
if (I == PFL.end()) // No tokens available?
return 0;
-
- const PTHFileData& FileData = *I;
-
+
+ const PTHFileData& FileData = *I;
+
const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
// Compute the offset of the token data within the buffer.
const unsigned char* data = BufStart + FileData.getTokenOffset();
@@ -604,9 +604,9 @@ PTHLexer *PTHManager::CreateLexer(FileID FID) {
const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
uint32_t Len = ReadLE32(ppcond);
if (Len == 0) ppcond = 0;
-
+
assert(PP && "No preprocessor set yet!");
- return new PTHLexer(*PP, FID, data, ppcond, *this);
+ return new PTHLexer(*PP, FID, data, ppcond, *this);
}
//===----------------------------------------------------------------------===//
@@ -622,19 +622,19 @@ public:
const mode_t mode;
const time_t mtime;
const off_t size;
-
+
PTHStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
- : hasStat(true), ino(i), dev(d), mode(mo), mtime(m), size(s) {}
-
+ : hasStat(true), ino(i), dev(d), mode(mo), mtime(m), size(s) {}
+
PTHStatData()
: hasStat(false), ino(0), dev(0), mode(0), mtime(0), size(0) {}
};
-
+
class VISIBILITY_HIDDEN PTHStatLookupTrait : public PTHFileLookupCommonTrait {
public:
typedef const char* external_key_type; // const char*
typedef PTHStatData data_type;
-
+
static internal_key_type GetInternalKey(const char *path) {
// The key 'kind' doesn't matter here because it is ignored in EqualKey.
return std::make_pair((unsigned char) 0x0, path);
@@ -644,17 +644,17 @@ public:
// When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
// just the paths.
return strcmp(a.second, b.second) == 0;
- }
-
+ }
+
static data_type ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
-
+ unsigned) {
+
if (k.first /* File or Directory */) {
if (k.first == 0x1 /* File */) d += 4 * 2; // Skip the first 2 words.
ino_t ino = (ino_t) ReadUnalignedLE32(d);
dev_t dev = (dev_t) ReadUnalignedLE32(d);
mode_t mode = (mode_t) ReadUnalignedLE16(d);
- time_t mtime = (time_t) ReadUnalignedLE64(d);
+ time_t mtime = (time_t) ReadUnalignedLE64(d);
return data_type(ino, dev, mode, mtime, (off_t) ReadUnalignedLE64(d));
}
@@ -667,22 +667,22 @@ class VISIBILITY_HIDDEN PTHStatCache : public StatSysCallCache {
typedef OnDiskChainedHashTable<PTHStatLookupTrait> CacheTy;
CacheTy Cache;
-public:
+public:
PTHStatCache(PTHFileLookup &FL) :
Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
FL.getBase()) {}
~PTHStatCache() {}
-
+
int stat(const char *path, struct stat *buf) {
// Do the lookup for the file's data in the PTH file.
CacheTy::iterator I = Cache.find(path);
// If we don't get a hit in the PTH file just forward to 'stat'.
if (I == Cache.end()) return ::stat(path, buf);
-
+
const PTHStatData& Data = *I;
-
+
if (!Data.hasStat)
return 1;
OpenPOWER on IntegriCloud