summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Bitcode/Writer
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Bitcode/Writer')
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp14
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp379
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp4
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp524
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h39
5 files changed, 626 insertions, 334 deletions
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp
index 3747122..7218ea0 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp
@@ -18,10 +18,10 @@ using namespace llvm;
/*===-- Operations on modules ---------------------------------------------===*/
int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path) {
- std::string ErrorInfo;
- raw_fd_ostream OS(Path, ErrorInfo, sys::fs::F_None);
+ std::error_code EC;
+ raw_fd_ostream OS(Path, EC, sys::fs::F_None);
- if (!ErrorInfo.empty())
+ if (EC)
return -1;
WriteBitcodeToFile(unwrap(M), OS);
@@ -39,3 +39,11 @@ int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose,
int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int FileHandle) {
return LLVMWriteBitcodeToFD(M, FileHandle, true, false);
}
+
+LLVMMemoryBufferRef LLVMWriteBitcodeToMemoryBuffer(LLVMModuleRef M) {
+ std::string Data;
+ raw_string_ostream OS(Data);
+
+ WriteBitcodeToFile(unwrap(M), OS);
+ return wrap(MemoryBuffer::getMemBufferCopy(OS.str()).release());
+}
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index b2e4948..a96e866 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/UseListOrder.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -32,12 +33,6 @@
#include <map>
using namespace llvm;
-static cl::opt<bool>
-EnablePreserveUseListOrdering("enable-bc-uselist-preserve",
- cl::desc("Turn on experimental support for "
- "use-list order preservation."),
- cl::init(false), cl::Hidden);
-
/// These are manifest constants used by the bitcode writer. They do not need to
/// be kept in sync with the reader, but need to be consistent within this file.
enum {
@@ -482,17 +477,28 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
static unsigned getEncodedLinkage(const GlobalValue &GV) {
switch (GV.getLinkage()) {
- case GlobalValue::ExternalLinkage: return 0;
- case GlobalValue::WeakAnyLinkage: return 1;
- case GlobalValue::AppendingLinkage: return 2;
- case GlobalValue::InternalLinkage: return 3;
- case GlobalValue::LinkOnceAnyLinkage: return 4;
- case GlobalValue::ExternalWeakLinkage: return 7;
- case GlobalValue::CommonLinkage: return 8;
- case GlobalValue::PrivateLinkage: return 9;
- case GlobalValue::WeakODRLinkage: return 10;
- case GlobalValue::LinkOnceODRLinkage: return 11;
- case GlobalValue::AvailableExternallyLinkage: return 12;
+ case GlobalValue::ExternalLinkage:
+ return 0;
+ case GlobalValue::WeakAnyLinkage:
+ return 1;
+ case GlobalValue::AppendingLinkage:
+ return 2;
+ case GlobalValue::InternalLinkage:
+ return 3;
+ case GlobalValue::LinkOnceAnyLinkage:
+ return 4;
+ case GlobalValue::ExternalWeakLinkage:
+ return 7;
+ case GlobalValue::CommonLinkage:
+ return 8;
+ case GlobalValue::PrivateLinkage:
+ return 9;
+ case GlobalValue::WeakODRLinkage:
+ return 10;
+ case GlobalValue::LinkOnceODRLinkage:
+ return 11;
+ case GlobalValue::AvailableExternallyLinkage:
+ return 12;
}
llvm_unreachable("Invalid linkage");
}
@@ -675,7 +681,8 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
// Emit the function proto information.
for (const Function &F : *M) {
// FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
- // section, visibility, gc, unnamed_addr, prefix]
+ // section, visibility, gc, unnamed_addr, prologuedata,
+ // dllstorageclass, comdat, prefixdata]
Vals.push_back(VE.getTypeID(F.getType()));
Vals.push_back(F.getCallingConv());
Vals.push_back(F.isDeclaration());
@@ -686,10 +693,12 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
Vals.push_back(getEncodedVisibility(F));
Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0);
Vals.push_back(F.hasUnnamedAddr());
- Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1)
- : 0);
+ Vals.push_back(F.hasPrologueData() ? (VE.getValueID(F.getPrologueData()) + 1)
+ : 0);
Vals.push_back(getEncodedDLLStorageClass(F));
Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0);
+ Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1)
+ : 0);
unsigned AbbrevToUse = 0;
Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
@@ -715,18 +724,15 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
static uint64_t GetOptimizationFlags(const Value *V) {
uint64_t Flags = 0;
- if (const OverflowingBinaryOperator *OBO =
- dyn_cast<OverflowingBinaryOperator>(V)) {
+ if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(V)) {
if (OBO->hasNoSignedWrap())
Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP;
if (OBO->hasNoUnsignedWrap())
Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP;
- } else if (const PossiblyExactOperator *PEO =
- dyn_cast<PossiblyExactOperator>(V)) {
+ } else if (const auto *PEO = dyn_cast<PossiblyExactOperator>(V)) {
if (PEO->isExact())
Flags |= 1 << bitc::PEO_EXACT;
- } else if (const FPMathOperator *FPMO =
- dyn_cast<const FPMathOperator>(V)) {
+ } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) {
if (FPMO->hasUnsafeAlgebra())
Flags |= FastMathFlags::UnsafeAlgebra;
if (FPMO->hasNoNaNs())
@@ -742,89 +748,140 @@ static uint64_t GetOptimizationFlags(const Value *V) {
return Flags;
}
+static void WriteValueAsMetadata(const ValueAsMetadata *MD,
+ const ValueEnumerator &VE,
+ BitstreamWriter &Stream,
+ SmallVectorImpl<uint64_t> &Record) {
+ // Mimic an MDNode with a value as one operand.
+ Value *V = MD->getValue();
+ Record.push_back(VE.getTypeID(V->getType()));
+ Record.push_back(VE.getValueID(V));
+ Stream.EmitRecord(bitc::METADATA_VALUE, Record, 0);
+ Record.clear();
+}
+
static void WriteMDNode(const MDNode *N,
const ValueEnumerator &VE,
BitstreamWriter &Stream,
SmallVectorImpl<uint64_t> &Record) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (N->getOperand(i)) {
- Record.push_back(VE.getTypeID(N->getOperand(i)->getType()));
- Record.push_back(VE.getValueID(N->getOperand(i)));
- } else {
- Record.push_back(VE.getTypeID(Type::getVoidTy(N->getContext())));
+ Metadata *MD = N->getOperand(i);
+ if (!MD) {
Record.push_back(0);
+ continue;
}
+ assert(!isa<LocalAsMetadata>(MD) && "Unexpected function-local metadata");
+ Record.push_back(VE.getMetadataID(MD) + 1);
}
- unsigned MDCode = N->isFunctionLocal() ? bitc::METADATA_FN_NODE :
- bitc::METADATA_NODE;
- Stream.EmitRecord(MDCode, Record, 0);
+ Stream.EmitRecord(N->isDistinct() ? bitc::METADATA_DISTINCT_NODE
+ : bitc::METADATA_NODE,
+ Record);
+ Record.clear();
+}
+
+static void WriteMDLocation(const MDLocation *N, const ValueEnumerator &VE,
+ BitstreamWriter &Stream,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getLine());
+ Record.push_back(N->getColumn());
+ Record.push_back(VE.getMetadataID(N->getScope()));
+
+ // Always emit the inlined-at location, even though it's optional.
+ if (Metadata *InlinedAt = N->getInlinedAt())
+ Record.push_back(VE.getMetadataID(InlinedAt) + 1);
+ else
+ Record.push_back(0);
+
+ Stream.EmitRecord(bitc::METADATA_LOCATION, Record, Abbrev);
Record.clear();
}
static void WriteModuleMetadata(const Module *M,
const ValueEnumerator &VE,
BitstreamWriter &Stream) {
- const ValueEnumerator::ValueList &Vals = VE.getMDValues();
- bool StartedMetadataBlock = false;
+ const auto &MDs = VE.getMDs();
+ if (MDs.empty() && M->named_metadata_empty())
+ return;
+
+ Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
+
unsigned MDSAbbrev = 0;
- SmallVector<uint64_t, 64> Record;
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ if (VE.hasMDString()) {
+ // Abbrev for METADATA_STRING.
+ BitCodeAbbrev *Abbv = new BitCodeAbbrev();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRING));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+ MDSAbbrev = Stream.EmitAbbrev(Abbv);
+ }
- if (const MDNode *N = dyn_cast<MDNode>(Vals[i].first)) {
- if (!N->isFunctionLocal() || !N->getFunction()) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
- StartedMetadataBlock = true;
- }
- WriteMDNode(N, VE, Stream, Record);
- }
- } else if (const MDString *MDS = dyn_cast<MDString>(Vals[i].first)) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
-
- // Abbrev for METADATA_STRING.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRING));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
- MDSAbbrev = Stream.EmitAbbrev(Abbv);
- StartedMetadataBlock = true;
- }
+ unsigned LocAbbrev = 0;
+ if (VE.hasMDLocation()) {
+ // Abbrev for METADATA_LOCATION.
+ //
+ // Assume the column is usually under 128, and always output the inlined-at
+ // location (it's never more expensive than building an array size 1).
+ BitCodeAbbrev *Abbv = new BitCodeAbbrev();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_LOCATION));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ LocAbbrev = Stream.EmitAbbrev(Abbv);
+ }
- // Code: [strchar x N]
- Record.append(MDS->begin(), MDS->end());
+ unsigned NameAbbrev = 0;
+ if (!M->named_metadata_empty()) {
+ // Abbrev for METADATA_NAME.
+ BitCodeAbbrev *Abbv = new BitCodeAbbrev();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_NAME));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+ NameAbbrev = Stream.EmitAbbrev(Abbv);
+ }
- // Emit the finished record.
- Stream.EmitRecord(bitc::METADATA_STRING, Record, MDSAbbrev);
- Record.clear();
+ SmallVector<uint64_t, 64> Record;
+ for (const Metadata *MD : MDs) {
+ if (const MDLocation *Loc = dyn_cast<MDLocation>(MD)) {
+ WriteMDLocation(Loc, VE, Stream, Record, LocAbbrev);
+ continue;
+ }
+ if (const MDNode *N = dyn_cast<MDNode>(MD)) {
+ WriteMDNode(N, VE, Stream, Record);
+ continue;
}
+ if (const auto *MDC = dyn_cast<ConstantAsMetadata>(MD)) {
+ WriteValueAsMetadata(MDC, VE, Stream, Record);
+ continue;
+ }
+ const MDString *MDS = cast<MDString>(MD);
+ // Code: [strchar x N]
+ Record.append(MDS->bytes_begin(), MDS->bytes_end());
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::METADATA_STRING, Record, MDSAbbrev);
+ Record.clear();
}
// Write named metadata.
- for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
- E = M->named_metadata_end(); I != E; ++I) {
- const NamedMDNode *NMD = I;
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
- StartedMetadataBlock = true;
- }
-
+ for (const NamedMDNode &NMD : M->named_metadata()) {
// Write name.
- StringRef Str = NMD->getName();
- for (unsigned i = 0, e = Str.size(); i != e; ++i)
- Record.push_back(Str[i]);
- Stream.EmitRecord(bitc::METADATA_NAME, Record, 0/*TODO*/);
+ StringRef Str = NMD.getName();
+ Record.append(Str.bytes_begin(), Str.bytes_end());
+ Stream.EmitRecord(bitc::METADATA_NAME, Record, NameAbbrev);
Record.clear();
// Write named metadata operands.
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
- Record.push_back(VE.getValueID(NMD->getOperand(i)));
+ for (const MDNode *N : NMD.operands())
+ Record.push_back(VE.getMetadataID(N));
Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0);
Record.clear();
}
- if (StartedMetadataBlock)
- Stream.ExitBlock();
+ Stream.ExitBlock();
}
static void WriteFunctionLocalMetadata(const Function &F,
@@ -832,16 +889,16 @@ static void WriteFunctionLocalMetadata(const Function &F,
BitstreamWriter &Stream) {
bool StartedMetadataBlock = false;
SmallVector<uint64_t, 64> Record;
- const SmallVectorImpl<const MDNode *> &Vals = VE.getFunctionLocalMDValues();
- for (unsigned i = 0, e = Vals.size(); i != e; ++i)
- if (const MDNode *N = Vals[i])
- if (N->isFunctionLocal() && N->getFunction() == &F) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
- StartedMetadataBlock = true;
- }
- WriteMDNode(N, VE, Stream, Record);
- }
+ const SmallVectorImpl<const LocalAsMetadata *> &MDs =
+ VE.getFunctionLocalMDs();
+ for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
+ assert(MDs[i] && "Expected valid function-local metadata");
+ if (!StartedMetadataBlock) {
+ Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
+ StartedMetadataBlock = true;
+ }
+ WriteValueAsMetadata(MDs[i], VE, Stream, Record);
+ }
if (StartedMetadataBlock)
Stream.ExitBlock();
@@ -856,7 +913,7 @@ static void WriteMetadataAttachment(const Function &F,
// Write metadata attachments
// METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]]
- SmallVector<std::pair<unsigned, MDNode*>, 4> MDs;
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
@@ -871,7 +928,7 @@ static void WriteMetadataAttachment(const Function &F,
for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
Record.push_back(MDs[i].first);
- Record.push_back(VE.getValueID(MDs[i].second));
+ Record.push_back(VE.getMetadataID(MDs[i].second));
}
Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
Record.clear();
@@ -1607,6 +1664,39 @@ static void WriteValueSymbolTable(const ValueSymbolTable &VST,
Stream.ExitBlock();
}
+static void WriteUseList(ValueEnumerator &VE, UseListOrder &&Order,
+ BitstreamWriter &Stream) {
+ assert(Order.Shuffle.size() >= 2 && "Shuffle too small");
+ unsigned Code;
+ if (isa<BasicBlock>(Order.V))
+ Code = bitc::USELIST_CODE_BB;
+ else
+ Code = bitc::USELIST_CODE_DEFAULT;
+
+ SmallVector<uint64_t, 64> Record;
+ for (unsigned I : Order.Shuffle)
+ Record.push_back(I);
+ Record.push_back(VE.getValueID(Order.V));
+ Stream.EmitRecord(Code, Record);
+}
+
+static void WriteUseListBlock(const Function *F, ValueEnumerator &VE,
+ BitstreamWriter &Stream) {
+ auto hasMore = [&]() {
+ return !VE.UseListOrders.empty() && VE.UseListOrders.back().F == F;
+ };
+ if (!hasMore())
+ // Nothing to do.
+ return;
+
+ Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3);
+ while (hasMore()) {
+ WriteUseList(VE, std::move(VE.UseListOrders.back()), Stream);
+ VE.UseListOrders.pop_back();
+ }
+ Stream.ExitBlock();
+}
+
/// WriteFunction - Emit a function body to the module stream.
static void WriteFunction(const Function &F, ValueEnumerator &VE,
BitstreamWriter &Stream) {
@@ -1658,11 +1748,12 @@ static void WriteFunction(const Function &F, ValueEnumerator &VE,
} else {
MDNode *Scope, *IA;
DL.getScopeAndInlinedAt(Scope, IA, I->getContext());
+ assert(Scope && "Expected valid scope");
Vals.push_back(DL.getLine());
Vals.push_back(DL.getCol());
- Vals.push_back(Scope ? VE.getValueID(Scope)+1 : 0);
- Vals.push_back(IA ? VE.getValueID(IA)+1 : 0);
+ Vals.push_back(Scope ? VE.getMetadataID(Scope) + 1 : 0);
+ Vals.push_back(IA ? VE.getMetadataID(IA) + 1 : 0);
Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals);
Vals.clear();
@@ -1675,6 +1766,8 @@ static void WriteFunction(const Function &F, ValueEnumerator &VE,
if (NeedsMetadataAttachment)
WriteMetadataAttachment(F, VE, Stream);
+ if (shouldPreserveBitcodeUseListOrder())
+ WriteUseListBlock(&F, VE, Stream);
VE.purgeFunction();
Stream.ExitBlock();
}
@@ -1840,98 +1933,6 @@ static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Stream.ExitBlock();
}
-// Sort the Users based on the order in which the reader parses the bitcode
-// file.
-static bool bitcodereader_order(const User *lhs, const User *rhs) {
- // TODO: Implement.
- return true;
-}
-
-static void WriteUseList(const Value *V, const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
-
- // One or zero uses can't get out of order.
- if (V->use_empty() || V->hasNUses(1))
- return;
-
- // Make a copy of the in-memory use-list for sorting.
- SmallVector<const User*, 8> UserList(V->user_begin(), V->user_end());
-
- // Sort the copy based on the order read by the BitcodeReader.
- std::sort(UserList.begin(), UserList.end(), bitcodereader_order);
-
- // TODO: Generate a diff between the BitcodeWriter in-memory use-list and the
- // sorted list (i.e., the expected BitcodeReader in-memory use-list).
-
- // TODO: Emit the USELIST_CODE_ENTRYs.
-}
-
-static void WriteFunctionUseList(const Function *F, ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- VE.incorporateFunction(*F);
-
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI)
- WriteUseList(AI, VE, Stream);
- for (Function::const_iterator BB = F->begin(), FE = F->end(); BB != FE;
- ++BB) {
- WriteUseList(BB, VE, Stream);
- for (BasicBlock::const_iterator II = BB->begin(), IE = BB->end(); II != IE;
- ++II) {
- WriteUseList(II, VE, Stream);
- for (User::const_op_iterator OI = II->op_begin(), E = II->op_end();
- OI != E; ++OI) {
- if ((isa<Constant>(*OI) && !isa<GlobalValue>(*OI)) ||
- isa<InlineAsm>(*OI))
- WriteUseList(*OI, VE, Stream);
- }
- }
- }
- VE.purgeFunction();
-}
-
-// Emit use-lists.
-static void WriteModuleUseLists(const Module *M, ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3);
-
- // XXX: this modifies the module, but in a way that should never change the
- // behavior of any pass or codegen in LLVM. The problem is that GVs may
- // contain entries in the use_list that do not exist in the Module and are
- // not stored in the .bc file.
- for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
- I != E; ++I)
- I->removeDeadConstantUsers();
-
- // Write the global variables.
- for (Module::const_global_iterator GI = M->global_begin(),
- GE = M->global_end(); GI != GE; ++GI) {
- WriteUseList(GI, VE, Stream);
-
- // Write the global variable initializers.
- if (GI->hasInitializer())
- WriteUseList(GI->getInitializer(), VE, Stream);
- }
-
- // Write the functions.
- for (Module::const_iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI) {
- WriteUseList(FI, VE, Stream);
- if (!FI->isDeclaration())
- WriteFunctionUseList(FI, VE, Stream);
- if (FI->hasPrefixData())
- WriteUseList(FI->getPrefixData(), VE, Stream);
- }
-
- // Write the aliases.
- for (Module::const_alias_iterator AI = M->alias_begin(), AE = M->alias_end();
- AI != AE; ++AI) {
- WriteUseList(AI, VE, Stream);
- WriteUseList(AI->getAliasee(), VE, Stream);
- }
-
- Stream.ExitBlock();
-}
-
/// WriteModule - Emit the specified module to the bitstream.
static void WriteModule(const Module *M, BitstreamWriter &Stream) {
Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
@@ -1942,7 +1943,7 @@ static void WriteModule(const Module *M, BitstreamWriter &Stream) {
Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
// Analyze the module, enumerating globals, functions, etc.
- ValueEnumerator VE(M);
+ ValueEnumerator VE(*M);
// Emit blockinfo, which defines the standard abbreviations etc.
WriteBlockInfo(VE, Stream);
@@ -1974,9 +1975,9 @@ static void WriteModule(const Module *M, BitstreamWriter &Stream) {
// Emit names for globals/functions etc.
WriteValueSymbolTable(M->getValueSymbolTable(), VE, Stream);
- // Emit use-lists.
- if (EnablePreserveUseListOrdering)
- WriteModuleUseLists(M, VE, Stream);
+ // Emit module-level use-lists.
+ if (shouldPreserveBitcodeUseListOrder())
+ WriteUseListBlock(nullptr, VE, Stream);
// Emit function bodies.
for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F)
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
index 4167f6d..25456a4 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
@@ -18,8 +18,8 @@
#include "llvm/Pass.h"
using namespace llvm;
-PreservedAnalyses BitcodeWriterPass::run(Module *M) {
- WriteBitcodeToFile(M, OS);
+PreservedAnalyses BitcodeWriterPass::run(Module &M) {
+ WriteBitcodeToFile(&M, OS);
return PreservedAnalyses::all();
}
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index 15f8034..27a63d8 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -18,31 +18,288 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/UseListOrder.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
+namespace {
+struct OrderMap {
+ DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
+ unsigned LastGlobalConstantID;
+ unsigned LastGlobalValueID;
+
+ OrderMap() : LastGlobalConstantID(0), LastGlobalValueID(0) {}
+
+ bool isGlobalConstant(unsigned ID) const {
+ return ID <= LastGlobalConstantID;
+ }
+ bool isGlobalValue(unsigned ID) const {
+ return ID <= LastGlobalValueID && !isGlobalConstant(ID);
+ }
+
+ unsigned size() const { return IDs.size(); }
+ std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; }
+ std::pair<unsigned, bool> lookup(const Value *V) const {
+ return IDs.lookup(V);
+ }
+ void index(const Value *V) {
+ // Explicitly sequence get-size and insert-value operations to avoid UB.
+ unsigned ID = IDs.size() + 1;
+ IDs[V].first = ID;
+ }
+};
+}
+
+static void orderValue(const Value *V, OrderMap &OM) {
+ if (OM.lookup(V).first)
+ return;
+
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands() && !isa<GlobalValue>(C))
+ for (const Value *Op : C->operands())
+ if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
+ orderValue(Op, OM);
+
+ // Note: we cannot cache this lookup above, since inserting into the map
+ // changes the map's size, and thus affects the other IDs.
+ OM.index(V);
+}
+
+static OrderMap orderModule(const Module &M) {
+ // This needs to match the order used by ValueEnumerator::ValueEnumerator()
+ // and ValueEnumerator::incorporateFunction().
+ OrderMap OM;
+
+ // In the reader, initializers of GlobalValues are set *after* all the
+ // globals have been read. Rather than awkwardly modeling this behaviour
+ // directly in predictValueUseListOrderImpl(), just assign IDs to
+ // initializers of GlobalValues before GlobalValues themselves to model this
+ // implicitly.
+ for (const GlobalVariable &G : M.globals())
+ if (G.hasInitializer())
+ if (!isa<GlobalValue>(G.getInitializer()))
+ orderValue(G.getInitializer(), OM);
+ for (const GlobalAlias &A : M.aliases())
+ if (!isa<GlobalValue>(A.getAliasee()))
+ orderValue(A.getAliasee(), OM);
+ for (const Function &F : M) {
+ if (F.hasPrefixData())
+ if (!isa<GlobalValue>(F.getPrefixData()))
+ orderValue(F.getPrefixData(), OM);
+ if (F.hasPrologueData())
+ if (!isa<GlobalValue>(F.getPrologueData()))
+ orderValue(F.getPrologueData(), OM);
+ }
+ OM.LastGlobalConstantID = OM.size();
+
+ // Initializers of GlobalValues are processed in
+ // BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather
+ // than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
+ // by giving IDs in reverse order.
+ //
+ // Since GlobalValues never reference each other directly (just through
+ // initializers), their relative IDs only matter for determining order of
+ // uses in their initializers.
+ for (const Function &F : M)
+ orderValue(&F, OM);
+ for (const GlobalAlias &A : M.aliases())
+ orderValue(&A, OM);
+ for (const GlobalVariable &G : M.globals())
+ orderValue(&G, OM);
+ OM.LastGlobalValueID = OM.size();
+
+ for (const Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+ // Here we need to match the union of ValueEnumerator::incorporateFunction()
+ // and WriteFunction(). Basic blocks are implicitly declared before
+ // anything else (by declaring their size).
+ for (const BasicBlock &BB : F)
+ orderValue(&BB, OM);
+ for (const Argument &A : F.args())
+ orderValue(&A, OM);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ for (const Value *Op : I.operands())
+ if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
+ isa<InlineAsm>(*Op))
+ orderValue(Op, OM);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ orderValue(&I, OM);
+ }
+ return OM;
+}
+
+static void predictValueUseListOrderImpl(const Value *V, const Function *F,
+ unsigned ID, const OrderMap &OM,
+ UseListOrderStack &Stack) {
+ // Predict use-list order for this one.
+ typedef std::pair<const Use *, unsigned> Entry;
+ SmallVector<Entry, 64> List;
+ for (const Use &U : V->uses())
+ // Check if this user will be serialized.
+ if (OM.lookup(U.getUser()).first)
+ List.push_back(std::make_pair(&U, List.size()));
+
+ if (List.size() < 2)
+ // We may have lost some users.
+ return;
+
+ bool IsGlobalValue = OM.isGlobalValue(ID);
+ std::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
+ const Use *LU = L.first;
+ const Use *RU = R.first;
+ if (LU == RU)
+ return false;
+
+ auto LID = OM.lookup(LU->getUser()).first;
+ auto RID = OM.lookup(RU->getUser()).first;
+
+ // Global values are processed in reverse order.
+ //
+ // Moreover, initializers of GlobalValues are set *after* all the globals
+ // have been read (despite having earlier IDs). Rather than awkwardly
+ // modeling this behaviour here, orderModule() has assigned IDs to
+ // initializers of GlobalValues before GlobalValues themselves.
+ if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID))
+ return LID < RID;
+
+ // If ID is 4, then expect: 7 6 5 1 2 3.
+ if (LID < RID) {
+ if (RID <= ID)
+ if (!IsGlobalValue) // GlobalValue uses don't get reversed.
+ return true;
+ return false;
+ }
+ if (RID < LID) {
+ if (LID <= ID)
+ if (!IsGlobalValue) // GlobalValue uses don't get reversed.
+ return false;
+ return true;
+ }
+
+ // LID and RID are equal, so we have different operands of the same user.
+ // Assume operands are added in order for all instructions.
+ if (LID <= ID)
+ if (!IsGlobalValue) // GlobalValue uses don't get reversed.
+ return LU->getOperandNo() < RU->getOperandNo();
+ return LU->getOperandNo() > RU->getOperandNo();
+ });
+
+ if (std::is_sorted(
+ List.begin(), List.end(),
+ [](const Entry &L, const Entry &R) { return L.second < R.second; }))
+ // Order is already correct.
+ return;
+
+ // Store the shuffle.
+ Stack.emplace_back(V, F, List.size());
+ assert(List.size() == Stack.back().Shuffle.size() && "Wrong size");
+ for (size_t I = 0, E = List.size(); I != E; ++I)
+ Stack.back().Shuffle[I] = List[I].second;
+}
+
+static void predictValueUseListOrder(const Value *V, const Function *F,
+ OrderMap &OM, UseListOrderStack &Stack) {
+ auto &IDPair = OM[V];
+ assert(IDPair.first && "Unmapped value");
+ if (IDPair.second)
+ // Already predicted.
+ return;
+
+ // Do the actual prediction.
+ IDPair.second = true;
+ if (!V->use_empty() && std::next(V->use_begin()) != V->use_end())
+ predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack);
+
+ // Recursive descent into constants.
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands()) // Visit GlobalValues.
+ for (const Value *Op : C->operands())
+ if (isa<Constant>(Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, F, OM, Stack);
+}
+
+static UseListOrderStack predictUseListOrder(const Module &M) {
+ OrderMap OM = orderModule(M);
+
+ // Use-list orders need to be serialized after all the users have been added
+ // to a value, or else the shuffles will be incomplete. Store them per
+ // function in a stack.
+ //
+ // Aside from function order, the order of values doesn't matter much here.
+ UseListOrderStack Stack;
+
+ // We want to visit the functions backward now so we can list function-local
+ // constants in the last Function they're used in. Module-level constants
+ // have already been visited above.
+ for (auto I = M.rbegin(), E = M.rend(); I != E; ++I) {
+ const Function &F = *I;
+ if (F.isDeclaration())
+ continue;
+ for (const BasicBlock &BB : F)
+ predictValueUseListOrder(&BB, &F, OM, Stack);
+ for (const Argument &A : F.args())
+ predictValueUseListOrder(&A, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ for (const Value *Op : I.operands())
+ if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ predictValueUseListOrder(&I, &F, OM, Stack);
+ }
+
+ // Visit globals last, since the module-level use-list block will be seen
+ // before the function bodies are processed.
+ for (const GlobalVariable &G : M.globals())
+ predictValueUseListOrder(&G, nullptr, OM, Stack);
+ for (const Function &F : M)
+ predictValueUseListOrder(&F, nullptr, OM, Stack);
+ for (const GlobalAlias &A : M.aliases())
+ predictValueUseListOrder(&A, nullptr, OM, Stack);
+ for (const GlobalVariable &G : M.globals())
+ if (G.hasInitializer())
+ predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack);
+ for (const GlobalAlias &A : M.aliases())
+ predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack);
+ for (const Function &F : M) {
+ if (F.hasPrefixData())
+ predictValueUseListOrder(F.getPrefixData(), nullptr, OM, Stack);
+ if (F.hasPrologueData())
+ predictValueUseListOrder(F.getPrologueData(), nullptr, OM, Stack);
+ }
+
+ return Stack;
+}
+
static bool isIntOrIntVectorValue(const std::pair<const Value*, unsigned> &V) {
return V.first->getType()->isIntOrIntVectorTy();
}
-/// ValueEnumerator - Enumerate module-level information.
-ValueEnumerator::ValueEnumerator(const Module *M) {
+ValueEnumerator::ValueEnumerator(const Module &M)
+ : HasMDString(false), HasMDLocation(false) {
+ if (shouldPreserveBitcodeUseListOrder())
+ UseListOrders = predictUseListOrder(M);
+
// Enumerate the global variables.
- for (Module::const_global_iterator I = M->global_begin(),
- E = M->global_end(); I != E; ++I)
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
EnumerateValue(I);
// Enumerate the functions.
- for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
EnumerateValue(I);
EnumerateAttributes(cast<Function>(I)->getAttributes());
}
// Enumerate the aliases.
- for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
+ for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I)
EnumerateValue(I);
@@ -50,41 +307,58 @@ ValueEnumerator::ValueEnumerator(const Module *M) {
unsigned FirstConstant = Values.size();
// Enumerate the global variable initializers.
- for (Module::const_global_iterator I = M->global_begin(),
- E = M->global_end(); I != E; ++I)
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
if (I->hasInitializer())
EnumerateValue(I->getInitializer());
// Enumerate the aliasees.
- for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
+ for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I)
EnumerateValue(I->getAliasee());
// Enumerate the prefix data constants.
- for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I)
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
if (I->hasPrefixData())
EnumerateValue(I->getPrefixData());
+ // Enumerate the prologue data constants.
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
+ if (I->hasPrologueData())
+ EnumerateValue(I->getPrologueData());
+
+ // Enumerate the metadata type.
+ //
+ // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode
+ // only encodes the metadata type when it's used as a value.
+ EnumerateType(Type::getMetadataTy(M.getContext()));
+
// Insert constants and metadata that are named at module level into the slot
// pool so that the module symbol table can refer to them...
- EnumerateValueSymbolTable(M->getValueSymbolTable());
+ EnumerateValueSymbolTable(M.getValueSymbolTable());
EnumerateNamedMetadata(M);
- SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
// Enumerate types used by function bodies and argument lists.
- for (const Function &F : *M) {
+ for (const Function &F : M) {
for (const Argument &A : F.args())
EnumerateType(A.getType());
for (const BasicBlock &BB : F)
for (const Instruction &I : BB) {
for (const Use &Op : I.operands()) {
- if (MDNode *MD = dyn_cast<MDNode>(&Op))
- if (MD->isFunctionLocal() && MD->getFunction())
- // These will get enumerated during function-incorporation.
- continue;
- EnumerateOperandType(Op);
+ auto *MD = dyn_cast<MetadataAsValue>(&Op);
+ if (!MD) {
+ EnumerateOperandType(Op);
+ continue;
+ }
+
+ // Local metadata is enumerated during function-incorporation.
+ if (isa<LocalAsMetadata>(MD->getMetadata()))
+ continue;
+
+ EnumerateMetadata(MD->getMetadata());
}
EnumerateType(I.getType());
if (const CallInst *CI = dyn_cast<CallInst>(&I))
@@ -128,17 +402,20 @@ void ValueEnumerator::setInstructionID(const Instruction *I) {
}
unsigned ValueEnumerator::getValueID(const Value *V) const {
- if (isa<MDNode>(V) || isa<MDString>(V)) {
- ValueMapType::const_iterator I = MDValueMap.find(V);
- assert(I != MDValueMap.end() && "Value not in slotcalculator!");
- return I->second-1;
- }
+ if (auto *MD = dyn_cast<MetadataAsValue>(V))
+ return getMetadataID(MD->getMetadata());
ValueMapType::const_iterator I = ValueMap.find(V);
assert(I != ValueMap.end() && "Value not in slotcalculator!");
return I->second-1;
}
+unsigned ValueEnumerator::getMetadataID(const Metadata *MD) const {
+ auto I = MDValueMap.find(MD);
+ assert(I != MDValueMap.end() && "Metadata not in slotcalculator!");
+ return I->second - 1;
+}
+
void ValueEnumerator::dump() const {
print(dbgs(), ValueMap, "Default");
dbgs() << '\n';
@@ -175,10 +452,27 @@ void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
}
}
+void ValueEnumerator::print(raw_ostream &OS, const MetadataMapType &Map,
+ const char *Name) const {
+
+ OS << "Map Name: " << Name << "\n";
+ OS << "Size: " << Map.size() << "\n";
+ for (auto I = Map.begin(), E = Map.end(); I != E; ++I) {
+ const Metadata *MD = I->first;
+ OS << "Metadata: slot = " << I->second << "\n";
+ MD->print(OS);
+ }
+}
+
/// OptimizeConstants - Reorder constant pool for denser encoding.
void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) {
if (CstStart == CstEnd || CstStart+1 == CstEnd) return;
+ if (shouldPreserveBitcodeUseListOrder())
+ // Optimizing constants makes the use-list order difficult to predict.
+ // Disable it for now when trying to preserve the order.
+ return;
+
std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd,
[this](const std::pair<const Value *, unsigned> &LHS,
const std::pair<const Value *, unsigned> &RHS) {
@@ -209,11 +503,12 @@ void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) {
EnumerateValue(VI->getValue());
}
-/// EnumerateNamedMetadata - Insert all of the values referenced by
-/// named metadata in the specified module.
-void ValueEnumerator::EnumerateNamedMetadata(const Module *M) {
- for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
- E = M->named_metadata_end(); I != E; ++I)
+/// Insert all of the values referenced by named metadata in the specified
+/// module.
+void ValueEnumerator::EnumerateNamedMetadata(const Module &M) {
+ for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end();
+ I != E; ++I)
EnumerateNamedMDNode(I);
}
@@ -226,84 +521,62 @@ void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) {
/// and types referenced by the given MDNode.
void ValueEnumerator::EnumerateMDNodeOperands(const MDNode *N) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (Value *V = N->getOperand(i)) {
- if (isa<MDNode>(V) || isa<MDString>(V))
- EnumerateMetadata(V);
- else if (!isa<Instruction>(V) && !isa<Argument>(V))
- EnumerateValue(V);
- } else
- EnumerateType(Type::getVoidTy(N->getContext()));
+ Metadata *MD = N->getOperand(i);
+ if (!MD)
+ continue;
+ assert(!isa<LocalAsMetadata>(MD) && "MDNodes cannot be function-local");
+ EnumerateMetadata(MD);
}
}
-void ValueEnumerator::EnumerateMetadata(const Value *MD) {
- assert((isa<MDNode>(MD) || isa<MDString>(MD)) && "Invalid metadata kind");
+void ValueEnumerator::EnumerateMetadata(const Metadata *MD) {
+ assert(
+ (isa<MDNode>(MD) || isa<MDString>(MD) || isa<ConstantAsMetadata>(MD)) &&
+ "Invalid metadata kind");
- // Enumerate the type of this value.
- EnumerateType(MD->getType());
-
- const MDNode *N = dyn_cast<MDNode>(MD);
+ // Insert a dummy ID to block the co-recursive call to
+ // EnumerateMDNodeOperands() from re-visiting MD in a cyclic graph.
+ //
+ // Return early if there's already an ID.
+ if (!MDValueMap.insert(std::make_pair(MD, 0)).second)
+ return;
- // In the module-level pass, skip function-local nodes themselves, but
- // do walk their operands.
- if (N && N->isFunctionLocal() && N->getFunction()) {
+ // Visit operands first to minimize RAUW.
+ if (auto *N = dyn_cast<MDNode>(MD))
EnumerateMDNodeOperands(N);
- return;
- }
+ else if (auto *C = dyn_cast<ConstantAsMetadata>(MD))
+ EnumerateValue(C->getValue());
- // Check to see if it's already in!
- unsigned &MDValueID = MDValueMap[MD];
- if (MDValueID) {
- // Increment use count.
- MDValues[MDValueID-1].second++;
- return;
- }
- MDValues.push_back(std::make_pair(MD, 1U));
- MDValueID = MDValues.size();
+ HasMDString |= isa<MDString>(MD);
+ HasMDLocation |= isa<MDLocation>(MD);
- // Enumerate all non-function-local operands.
- if (N)
- EnumerateMDNodeOperands(N);
+ // Replace the dummy ID inserted above with the correct one. MDValueMap may
+ // have changed by inserting operands, so we need a fresh lookup here.
+ MDs.push_back(MD);
+ MDValueMap[MD] = MDs.size();
}
/// EnumerateFunctionLocalMetadataa - Incorporate function-local metadata
-/// information reachable from the given MDNode.
-void ValueEnumerator::EnumerateFunctionLocalMetadata(const MDNode *N) {
- assert(N->isFunctionLocal() && N->getFunction() &&
- "EnumerateFunctionLocalMetadata called on non-function-local mdnode!");
-
- // Enumerate the type of this value.
- EnumerateType(N->getType());
-
+/// information reachable from the metadata.
+void ValueEnumerator::EnumerateFunctionLocalMetadata(
+ const LocalAsMetadata *Local) {
// Check to see if it's already in!
- unsigned &MDValueID = MDValueMap[N];
- if (MDValueID) {
- // Increment use count.
- MDValues[MDValueID-1].second++;
+ unsigned &MDValueID = MDValueMap[Local];
+ if (MDValueID)
return;
- }
- MDValues.push_back(std::make_pair(N, 1U));
- MDValueID = MDValues.size();
-
- // To incoroporate function-local information visit all function-local
- // MDNodes and all function-local values they reference.
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (Value *V = N->getOperand(i)) {
- if (MDNode *O = dyn_cast<MDNode>(V)) {
- if (O->isFunctionLocal() && O->getFunction())
- EnumerateFunctionLocalMetadata(O);
- } else if (isa<Instruction>(V) || isa<Argument>(V))
- EnumerateValue(V);
- }
- // Also, collect all function-local MDNodes for easy access.
- FunctionLocalMDs.push_back(N);
+ MDs.push_back(Local);
+ MDValueID = MDs.size();
+
+ EnumerateValue(Local->getValue());
+
+ // Also, collect all function-local metadata for easy access.
+ FunctionLocalMDs.push_back(Local);
}
void ValueEnumerator::EnumerateValue(const Value *V) {
assert(!V->getType()->isVoidTy() && "Can't insert void values!");
- assert(!isa<MDNode>(V) && !isa<MDString>(V) &&
- "EnumerateValue doesn't handle Metadata!");
+ assert(!isa<MetadataAsValue>(V) && "EnumerateValue doesn't handle Metadata!");
// Check to see if it's already in!
unsigned &ValueID = ValueMap[V];
@@ -367,9 +640,8 @@ void ValueEnumerator::EnumerateType(Type *Ty) {
// Enumerate all of the subtypes before we enumerate this type. This ensures
// that the type will be enumerated in an order that can be directly built.
- for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
- I != E; ++I)
- EnumerateType(*I);
+ for (Type *SubTy : Ty->subtypes())
+ EnumerateType(SubTy);
// Refresh the TypeID pointer in case the table rehashed.
TypeID = &TypeMap[Ty];
@@ -393,30 +665,35 @@ void ValueEnumerator::EnumerateType(Type *Ty) {
void ValueEnumerator::EnumerateOperandType(const Value *V) {
EnumerateType(V->getType());
- if (const Constant *C = dyn_cast<Constant>(V)) {
- // If this constant is already enumerated, ignore it, we know its type must
- // be enumerated.
- if (ValueMap.count(V)) return;
+ if (auto *MD = dyn_cast<MetadataAsValue>(V)) {
+ assert(!isa<LocalAsMetadata>(MD->getMetadata()) &&
+ "Function-local metadata should be left for later");
- // This constant may have operands, make sure to enumerate the types in
- // them.
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
- const Value *Op = C->getOperand(i);
+ EnumerateMetadata(MD->getMetadata());
+ return;
+ }
- // Don't enumerate basic blocks here, this happens as operands to
- // blockaddress.
- if (isa<BasicBlock>(Op)) continue;
+ const Constant *C = dyn_cast<Constant>(V);
+ if (!C)
+ return;
- EnumerateOperandType(Op);
- }
+ // If this constant is already enumerated, ignore it, we know its type must
+ // be enumerated.
+ if (ValueMap.count(C))
+ return;
- if (const MDNode *N = dyn_cast<MDNode>(V)) {
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (Value *Elem = N->getOperand(i))
- EnumerateOperandType(Elem);
- }
- } else if (isa<MDString>(V) || isa<MDNode>(V))
- EnumerateMetadata(V);
+ // This constant may have operands, make sure to enumerate the types in
+ // them.
+ for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
+ const Value *Op = C->getOperand(i);
+
+ // Don't enumerate basic blocks here, this happens as operands to
+ // blockaddress.
+ if (isa<BasicBlock>(Op))
+ continue;
+
+ EnumerateOperandType(Op);
+ }
}
void ValueEnumerator::EnumerateAttributes(AttributeSet PAL) {
@@ -444,7 +721,7 @@ void ValueEnumerator::EnumerateAttributes(AttributeSet PAL) {
void ValueEnumerator::incorporateFunction(const Function &F) {
InstructionCount = 0;
NumModuleValues = Values.size();
- NumModuleMDValues = MDValues.size();
+ NumModuleMDs = MDs.size();
// Adding function arguments to the value table.
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
@@ -475,24 +752,16 @@ void ValueEnumerator::incorporateFunction(const Function &F) {
FirstInstID = Values.size();
- SmallVector<MDNode *, 8> FnLocalMDVector;
+ SmallVector<LocalAsMetadata *, 8> FnLocalMDVector;
// Add all of the instructions.
for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) {
for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
OI != E; ++OI) {
- if (MDNode *MD = dyn_cast<MDNode>(*OI))
- if (MD->isFunctionLocal() && MD->getFunction())
+ if (auto *MD = dyn_cast<MetadataAsValue>(&*OI))
+ if (auto *Local = dyn_cast<LocalAsMetadata>(MD->getMetadata()))
// Enumerate metadata after the instructions they might refer to.
- FnLocalMDVector.push_back(MD);
- }
-
- SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
- I->getAllMetadataOtherThanDebugLoc(MDs);
- for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
- MDNode *N = MDs[i].second;
- if (N->isFunctionLocal() && N->getFunction())
- FnLocalMDVector.push_back(N);
+ FnLocalMDVector.push_back(Local);
}
if (!I->getType()->isVoidTy())
@@ -509,13 +778,13 @@ void ValueEnumerator::purgeFunction() {
/// Remove purged values from the ValueMap.
for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i)
ValueMap.erase(Values[i].first);
- for (unsigned i = NumModuleMDValues, e = MDValues.size(); i != e; ++i)
- MDValueMap.erase(MDValues[i].first);
+ for (unsigned i = NumModuleMDs, e = MDs.size(); i != e; ++i)
+ MDValueMap.erase(MDs[i]);
for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
ValueMap.erase(BasicBlocks[i]);
Values.resize(NumModuleValues);
- MDValues.resize(NumModuleMDValues);
+ MDs.resize(NumModuleMDs);
BasicBlocks.clear();
FunctionLocalMDs.clear();
}
@@ -538,4 +807,3 @@ unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const {
IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs);
return getGlobalBasicBlockID(BB);
}
-
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
index 1c9f38e..d363c1b 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#ifndef VALUE_ENUMERATOR_H
-#define VALUE_ENUMERATOR_H
+#ifndef LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H
+#define LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/UniqueVector.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/UseListOrder.h"
#include <vector>
namespace llvm {
@@ -29,6 +30,8 @@ class BasicBlock;
class Comdat;
class Function;
class Module;
+class Metadata;
+class LocalAsMetadata;
class MDNode;
class NamedMDNode;
class AttributeSet;
@@ -42,6 +45,9 @@ public:
// For each value, we remember its Value* and occurrence frequency.
typedef std::vector<std::pair<const Value*, unsigned> > ValueList;
+
+ UseListOrderStack UseListOrders;
+
private:
typedef DenseMap<Type*, unsigned> TypeMapType;
TypeMapType TypeMap;
@@ -54,9 +60,12 @@ private:
typedef UniqueVector<const Comdat *> ComdatSetType;
ComdatSetType Comdats;
- ValueList MDValues;
- SmallVector<const MDNode *, 8> FunctionLocalMDs;
- ValueMapType MDValueMap;
+ std::vector<const Metadata *> MDs;
+ SmallVector<const LocalAsMetadata *, 8> FunctionLocalMDs;
+ typedef DenseMap<const Metadata *, unsigned> MetadataMapType;
+ MetadataMapType MDValueMap;
+ bool HasMDString;
+ bool HasMDLocation;
typedef DenseMap<AttributeSet, unsigned> AttributeGroupMapType;
AttributeGroupMapType AttributeGroupMap;
@@ -84,7 +93,7 @@ private:
/// When a function is incorporated, this is the size of the MDValues list
/// before incorporation.
- unsigned NumModuleMDValues;
+ unsigned NumModuleMDs;
unsigned FirstFuncConstantID;
unsigned FirstInstID;
@@ -92,12 +101,18 @@ private:
ValueEnumerator(const ValueEnumerator &) LLVM_DELETED_FUNCTION;
void operator=(const ValueEnumerator &) LLVM_DELETED_FUNCTION;
public:
- ValueEnumerator(const Module *M);
+ ValueEnumerator(const Module &M);
void dump() const;
void print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const;
+ void print(raw_ostream &OS, const MetadataMapType &Map,
+ const char *Name) const;
unsigned getValueID(const Value *V) const;
+ unsigned getMetadataID(const Metadata *V) const;
+
+ bool hasMDString() const { return HasMDString; }
+ bool hasMDLocation() const { return HasMDLocation; }
unsigned getTypeID(Type *T) const {
TypeMapType::const_iterator I = TypeMap.find(T);
@@ -130,8 +145,8 @@ public:
}
const ValueList &getValues() const { return Values; }
- const ValueList &getMDValues() const { return MDValues; }
- const SmallVectorImpl<const MDNode *> &getFunctionLocalMDValues() const {
+ const std::vector<const Metadata *> &getMDs() const { return MDs; }
+ const SmallVectorImpl<const LocalAsMetadata *> &getFunctionLocalMDs() const {
return FunctionLocalMDs;
}
const TypeList &getTypes() const { return Types; }
@@ -163,8 +178,8 @@ private:
void OptimizeConstants(unsigned CstStart, unsigned CstEnd);
void EnumerateMDNodeOperands(const MDNode *N);
- void EnumerateMetadata(const Value *MD);
- void EnumerateFunctionLocalMetadata(const MDNode *N);
+ void EnumerateMetadata(const Metadata *MD);
+ void EnumerateFunctionLocalMetadata(const LocalAsMetadata *Local);
void EnumerateNamedMDNode(const NamedMDNode *NMD);
void EnumerateValue(const Value *V);
void EnumerateType(Type *T);
@@ -172,7 +187,7 @@ private:
void EnumerateAttributes(AttributeSet PAL);
void EnumerateValueSymbolTable(const ValueSymbolTable &ST);
- void EnumerateNamedMetadata(const Module *M);
+ void EnumerateNamedMetadata(const Module &M);
};
} // End llvm namespace
OpenPOWER on IntegriCloud