diff options
Diffstat (limited to 'lib/VMCore')
29 files changed, 1704 insertions, 446 deletions
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp index 7ef1131..b72c17f 100644 --- a/lib/VMCore/AsmWriter.cpp +++ b/lib/VMCore/AsmWriter.cpp @@ -66,6 +66,25 @@ static const Module *getModuleFromVal(const Value *V) { return 0; } +static void PrintCallingConv(unsigned cc, raw_ostream &Out) +{ + switch (cc) { + case CallingConv::Fast: Out << "fastcc"; break; + case CallingConv::Cold: Out << "coldcc"; break; + case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break; + case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break; + case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break; + case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break; + case CallingConv::ARM_APCS: Out << "arm_apcscc"; break; + case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break; + case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc"; break; + case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break; + case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break; + case CallingConv::PTX_Device: Out << "ptx_device"; break; + default: Out << "cc" << cc; break; + } +} + // PrintEscapedString - Print each character of the specified string, escaping // it if it is not printable or if it is an escape char. static void PrintEscapedString(StringRef Name, raw_ostream &Out) { @@ -141,8 +160,8 @@ static void PrintLLVMName(raw_ostream &OS, const Value *V) { /// TypePrinting - Type printing machinery. namespace { class TypePrinting { - TypePrinting(const TypePrinting &); // DO NOT IMPLEMENT - void operator=(const TypePrinting&); // DO NOT IMPLEMENT + TypePrinting(const TypePrinting &) LLVM_DELETED_FUNCTION; + void operator=(const TypePrinting&) LLVM_DELETED_FUNCTION; public: /// NamedTypes - The named types that are used by the current module. @@ -380,8 +399,8 @@ private: /// Add all of the functions arguments, basic blocks, and instructions. void processFunction(); - SlotTracker(const SlotTracker &); // DO NOT IMPLEMENT - void operator=(const SlotTracker &); // DO NOT IMPLEMENT + SlotTracker(const SlotTracker &) LLVM_DELETED_FUNCTION; + void operator=(const SlotTracker &) LLVM_DELETED_FUNCTION; }; } // end anonymous namespace @@ -1029,6 +1048,9 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, Out << "sideeffect "; if (IA->isAlignStack()) Out << "alignstack "; + // We don't emit the AD_ATT dialect as it's the assumed default. + if (IA->getDialect() == InlineAsm::AD_Intel) + Out << "inteldialect "; Out << '"'; PrintEscapedString(IA->getAsmString(), Out); Out << "\", \""; @@ -1222,8 +1244,8 @@ void AssemblyWriter::writeParamOperand(const Value *Operand, // Print the type TypePrinter.print(Operand->getType(), Out); // Print parameter attributes list - if (Attrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs); + if (Attrs.hasAttributes()) + Out << ' ' << Attrs.getAsString(); Out << ' '; // Print the operand WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule); @@ -1285,8 +1307,9 @@ void AssemblyWriter::printModule(const Module *M) { // Output all globals. if (!M->global_empty()) Out << '\n'; for (Module::const_global_iterator I = M->global_begin(), E = M->global_end(); - I != E; ++I) - printGlobal(I); + I != E; ++I) { + printGlobal(I); Out << '\n'; + } // Output all aliases. if (!M->alias_empty()) Out << "\n"; @@ -1353,12 +1376,12 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT, case GlobalValue::LinkerPrivateWeakLinkage: Out << "linker_private_weak "; break; - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: - Out << "linker_private_weak_def_auto "; - break; case GlobalValue::InternalLinkage: Out << "internal "; break; case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break; case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break; + case GlobalValue::LinkOnceODRAutoHideLinkage: + Out << "linkonce_odr_auto_hide "; + break; case GlobalValue::WeakAnyLinkage: Out << "weak "; break; case GlobalValue::WeakODRLinkage: Out << "weak_odr "; break; case GlobalValue::CommonLinkage: Out << "common "; break; @@ -1436,7 +1459,6 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) { Out << ", align " << GV->getAlignment(); printInfoComment(*GV); - Out << '\n'; } void AssemblyWriter::printAlias(const GlobalAlias *GA) { @@ -1527,27 +1549,16 @@ void AssemblyWriter::printFunction(const Function *F) { PrintVisibility(F->getVisibility(), Out); // Print the calling convention. - switch (F->getCallingConv()) { - case CallingConv::C: break; // default - case CallingConv::Fast: Out << "fastcc "; break; - case CallingConv::Cold: Out << "coldcc "; break; - case CallingConv::X86_StdCall: Out << "x86_stdcallcc "; break; - case CallingConv::X86_FastCall: Out << "x86_fastcallcc "; break; - case CallingConv::X86_ThisCall: Out << "x86_thiscallcc "; break; - case CallingConv::ARM_APCS: Out << "arm_apcscc "; break; - case CallingConv::ARM_AAPCS: Out << "arm_aapcscc "; break; - case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc "; break; - case CallingConv::MSP430_INTR: Out << "msp430_intrcc "; break; - case CallingConv::PTX_Kernel: Out << "ptx_kernel "; break; - case CallingConv::PTX_Device: Out << "ptx_device "; break; - default: Out << "cc" << F->getCallingConv() << " "; break; + if (F->getCallingConv() != CallingConv::C) { + PrintCallingConv(F->getCallingConv(), Out); + Out << " "; } FunctionType *FT = F->getFunctionType(); const AttrListPtr &Attrs = F->getAttributes(); Attributes RetAttrs = Attrs.getRetAttributes(); - if (RetAttrs != Attribute::None) - Out << Attribute::getAsString(Attrs.getRetAttributes()) << ' '; + if (RetAttrs.hasAttributes()) + Out << Attrs.getRetAttributes().getAsString() << ' '; TypePrinter.print(F->getReturnType(), Out); Out << ' '; WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent()); @@ -1576,8 +1587,8 @@ void AssemblyWriter::printFunction(const Function *F) { TypePrinter.print(FT->getParamType(i), Out); Attributes ArgAttrs = Attrs.getParamAttributes(i+1); - if (ArgAttrs != Attribute::None) - Out << ' ' << Attribute::getAsString(ArgAttrs); + if (ArgAttrs.hasAttributes()) + Out << ' ' << ArgAttrs.getAsString(); } } @@ -1590,8 +1601,8 @@ void AssemblyWriter::printFunction(const Function *F) { if (F->hasUnnamedAddr()) Out << " unnamed_addr"; Attributes FnAttrs = Attrs.getFnAttributes(); - if (FnAttrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs.getFnAttributes()); + if (FnAttrs.hasAttributes()) + Out << ' ' << Attrs.getFnAttributes().getAsString(); if (F->hasSection()) { Out << " section \""; PrintEscapedString(F->getSection(), Out); @@ -1624,8 +1635,8 @@ void AssemblyWriter::printArgument(const Argument *Arg, TypePrinter.print(Arg->getType(), Out); // Output parameter attributes list - if (Attrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs); + if (Attrs.hasAttributes()) + Out << ' ' << Attrs.getAsString(); // Output name, if available... if (Arg->hasName()) { @@ -1828,20 +1839,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " void"; } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) { // Print the calling convention being used. - switch (CI->getCallingConv()) { - case CallingConv::C: break; // default - case CallingConv::Fast: Out << " fastcc"; break; - case CallingConv::Cold: Out << " coldcc"; break; - case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break; - case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break; - case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break; - case CallingConv::ARM_APCS: Out << " arm_apcscc "; break; - case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break; - case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break; - case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break; - case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break; - case CallingConv::PTX_Device: Out << " ptx_device"; break; - default: Out << " cc" << CI->getCallingConv(); break; + if (CI->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(CI->getCallingConv(), Out); } Operand = CI->getCalledValue(); @@ -1850,8 +1850,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Type *RetTy = FTy->getReturnType(); const AttrListPtr &PAL = CI->getAttributes(); - if (PAL.getRetAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getRetAttributes()); + if (PAL.getRetAttributes().hasAttributes()) + Out << ' ' << PAL.getRetAttributes().getAsString(); // If possible, print out the short form of the call instruction. We can // only do this if the first argument is a pointer to a nonvararg function, @@ -1874,8 +1874,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1)); } Out << ')'; - if (PAL.getFnAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getFnAttributes()); + if (PAL.getFnAttributes().hasAttributes()) + Out << ' ' << PAL.getFnAttributes().getAsString(); } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) { Operand = II->getCalledValue(); PointerType *PTy = cast<PointerType>(Operand->getType()); @@ -1884,24 +1884,13 @@ void AssemblyWriter::printInstruction(const Instruction &I) { const AttrListPtr &PAL = II->getAttributes(); // Print the calling convention being used. - switch (II->getCallingConv()) { - case CallingConv::C: break; // default - case CallingConv::Fast: Out << " fastcc"; break; - case CallingConv::Cold: Out << " coldcc"; break; - case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break; - case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break; - case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break; - case CallingConv::ARM_APCS: Out << " arm_apcscc "; break; - case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break; - case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break; - case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break; - case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break; - case CallingConv::PTX_Device: Out << " ptx_device"; break; - default: Out << " cc" << II->getCallingConv(); break; + if (II->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(II->getCallingConv(), Out); } - if (PAL.getRetAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getRetAttributes()); + if (PAL.getRetAttributes().hasAttributes()) + Out << ' ' << PAL.getRetAttributes().getAsString(); // If possible, print out the short form of the invoke instruction. We can // only do this if the first argument is a pointer to a nonvararg function, @@ -1925,8 +1914,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } Out << ')'; - if (PAL.getFnAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getFnAttributes()); + if (PAL.getFnAttributes().hasAttributes()) + Out << ' ' << PAL.getFnAttributes().getAsString(); Out << "\n to "; writeOperand(II->getNormalDest(), true); diff --git a/lib/VMCore/Attributes.cpp b/lib/VMCore/Attributes.cpp index c8219eb..f1268e6 100644 --- a/lib/VMCore/Attributes.cpp +++ b/lib/VMCore/Attributes.cpp @@ -7,11 +7,14 @@ // //===----------------------------------------------------------------------===// // -// This file implements the AttributesList class and Attribute utilities. +// This file implements the Attributes, AttributeImpl, AttrBuilder, +// AttributeListImpl, and AttrListPtr classes. // //===----------------------------------------------------------------------===// #include "llvm/Attributes.h" +#include "AttributesImpl.h" +#include "LLVMContextImpl.h" #include "llvm/Type.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/FoldingSet.h" @@ -23,215 +26,382 @@ using namespace llvm; //===----------------------------------------------------------------------===// -// Attribute Function Definitions +// Attributes Implementation //===----------------------------------------------------------------------===// -std::string Attribute::getAsString(Attributes Attrs) { +Attributes Attributes::get(LLVMContext &Context, ArrayRef<AttrVal> Vals) { + AttrBuilder B; + for (ArrayRef<AttrVal>::iterator I = Vals.begin(), E = Vals.end(); + I != E; ++I) + B.addAttribute(*I); + return Attributes::get(Context, B); +} + +Attributes Attributes::get(LLVMContext &Context, AttrBuilder &B) { + // If there are no attributes, return an empty Attributes class. + if (!B.hasAttributes()) + return Attributes(); + + // Otherwise, build a key to look up the existing attributes. + LLVMContextImpl *pImpl = Context.pImpl; + FoldingSetNodeID ID; + ID.AddInteger(B.Raw()); + + void *InsertPoint; + AttributesImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint); + + if (!PA) { + // If we didn't find any existing attributes of the same shape then create a + // new one and insert it. + PA = new AttributesImpl(B.Raw()); + pImpl->AttrsSet.InsertNode(PA, InsertPoint); + } + + // Return the AttributesList that we found or created. + return Attributes(PA); +} + +bool Attributes::hasAttribute(AttrVal Val) const { + return Attrs && Attrs->hasAttribute(Val); +} + +bool Attributes::hasAttributes() const { + return Attrs && Attrs->hasAttributes(); +} + +bool Attributes::hasAttributes(const Attributes &A) const { + return Attrs && Attrs->hasAttributes(A); +} + +/// This returns the alignment field of an attribute as a byte alignment value. +unsigned Attributes::getAlignment() const { + if (!hasAttribute(Attributes::Alignment)) + return 0; + return 1U << ((Attrs->getAlignment() >> 16) - 1); +} + +/// This returns the stack alignment field of an attribute as a byte alignment +/// value. +unsigned Attributes::getStackAlignment() const { + if (!hasAttribute(Attributes::StackAlignment)) + return 0; + return 1U << ((Attrs->getStackAlignment() >> 26) - 1); +} + +uint64_t Attributes::Raw() const { + return Attrs ? Attrs->Raw() : 0; +} + +Attributes Attributes::typeIncompatible(Type *Ty) { + AttrBuilder Incompatible; + + if (!Ty->isIntegerTy()) + // Attributes that only apply to integers. + Incompatible.addAttribute(Attributes::SExt) + .addAttribute(Attributes::ZExt); + + if (!Ty->isPointerTy()) + // Attributes that only apply to pointers. + Incompatible.addAttribute(Attributes::ByVal) + .addAttribute(Attributes::Nest) + .addAttribute(Attributes::NoAlias) + .addAttribute(Attributes::NoCapture) + .addAttribute(Attributes::StructRet); + + return Attributes::get(Ty->getContext(), Incompatible); +} + +/// encodeLLVMAttributesForBitcode - This returns an integer containing an +/// encoding of all the LLVM attributes found in the given attribute bitset. +/// Any change to this encoding is a breaking change to bitcode compatibility. +uint64_t Attributes::encodeLLVMAttributesForBitcode(Attributes Attrs) { + // FIXME: It doesn't make sense to store the alignment information as an + // expanded out value, we should store it as a log2 value. However, we can't + // just change that here without breaking bitcode compatibility. If this ever + // becomes a problem in practice, we should introduce new tag numbers in the + // bitcode file and have those tags use a more efficiently encoded alignment + // field. + + // Store the alignment in the bitcode as a 16-bit raw value instead of a 5-bit + // log2 encoded value. Shift the bits above the alignment up by 11 bits. + uint64_t EncodedAttrs = Attrs.Raw() & 0xffff; + if (Attrs.hasAttribute(Attributes::Alignment)) + EncodedAttrs |= Attrs.getAlignment() << 16; + EncodedAttrs |= (Attrs.Raw() & (0xffffULL << 21)) << 11; + return EncodedAttrs; +} + +/// decodeLLVMAttributesForBitcode - This returns an attribute bitset containing +/// the LLVM attributes that have been decoded from the given integer. This +/// function must stay in sync with 'encodeLLVMAttributesForBitcode'. +Attributes Attributes::decodeLLVMAttributesForBitcode(LLVMContext &C, + uint64_t EncodedAttrs) { + // The alignment is stored as a 16-bit raw value from bits 31--16. We shift + // the bits above 31 down by 11 bits. + unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16; + assert((!Alignment || isPowerOf2_32(Alignment)) && + "Alignment must be a power of two."); + + AttrBuilder B(EncodedAttrs & 0xffff); + if (Alignment) + B.addAlignmentAttr(Alignment); + B.addRawValue((EncodedAttrs & (0xffffULL << 32)) >> 11); + return Attributes::get(C, B); +} + +std::string Attributes::getAsString() const { std::string Result; - if (Attrs & Attribute::ZExt) + if (hasAttribute(Attributes::ZExt)) Result += "zeroext "; - if (Attrs & Attribute::SExt) + if (hasAttribute(Attributes::SExt)) Result += "signext "; - if (Attrs & Attribute::NoReturn) + if (hasAttribute(Attributes::NoReturn)) Result += "noreturn "; - if (Attrs & Attribute::NoUnwind) + if (hasAttribute(Attributes::NoUnwind)) Result += "nounwind "; - if (Attrs & Attribute::UWTable) + if (hasAttribute(Attributes::UWTable)) Result += "uwtable "; - if (Attrs & Attribute::ReturnsTwice) + if (hasAttribute(Attributes::ReturnsTwice)) Result += "returns_twice "; - if (Attrs & Attribute::InReg) + if (hasAttribute(Attributes::InReg)) Result += "inreg "; - if (Attrs & Attribute::NoAlias) + if (hasAttribute(Attributes::NoAlias)) Result += "noalias "; - if (Attrs & Attribute::NoCapture) + if (hasAttribute(Attributes::NoCapture)) Result += "nocapture "; - if (Attrs & Attribute::StructRet) + if (hasAttribute(Attributes::StructRet)) Result += "sret "; - if (Attrs & Attribute::ByVal) + if (hasAttribute(Attributes::ByVal)) Result += "byval "; - if (Attrs & Attribute::Nest) + if (hasAttribute(Attributes::Nest)) Result += "nest "; - if (Attrs & Attribute::ReadNone) + if (hasAttribute(Attributes::ReadNone)) Result += "readnone "; - if (Attrs & Attribute::ReadOnly) + if (hasAttribute(Attributes::ReadOnly)) Result += "readonly "; - if (Attrs & Attribute::OptimizeForSize) + if (hasAttribute(Attributes::OptimizeForSize)) Result += "optsize "; - if (Attrs & Attribute::NoInline) + if (hasAttribute(Attributes::NoInline)) Result += "noinline "; - if (Attrs & Attribute::InlineHint) + if (hasAttribute(Attributes::InlineHint)) Result += "inlinehint "; - if (Attrs & Attribute::AlwaysInline) + if (hasAttribute(Attributes::AlwaysInline)) Result += "alwaysinline "; - if (Attrs & Attribute::StackProtect) + if (hasAttribute(Attributes::StackProtect)) Result += "ssp "; - if (Attrs & Attribute::StackProtectReq) + if (hasAttribute(Attributes::StackProtectReq)) Result += "sspreq "; - if (Attrs & Attribute::NoRedZone) + if (hasAttribute(Attributes::NoRedZone)) Result += "noredzone "; - if (Attrs & Attribute::NoImplicitFloat) + if (hasAttribute(Attributes::NoImplicitFloat)) Result += "noimplicitfloat "; - if (Attrs & Attribute::Naked) + if (hasAttribute(Attributes::Naked)) Result += "naked "; - if (Attrs & Attribute::NonLazyBind) + if (hasAttribute(Attributes::NonLazyBind)) Result += "nonlazybind "; - if (Attrs & Attribute::AddressSafety) + if (hasAttribute(Attributes::AddressSafety)) Result += "address_safety "; - if (Attrs & Attribute::StackAlignment) { + if (hasAttribute(Attributes::MinSize)) + Result += "minsize "; + if (hasAttribute(Attributes::StackAlignment)) { Result += "alignstack("; - Result += utostr(Attribute::getStackAlignmentFromAttrs(Attrs)); + Result += utostr(getStackAlignment()); Result += ") "; } - if (Attrs & Attribute::Alignment) { + if (hasAttribute(Attributes::Alignment)) { Result += "align "; - Result += utostr(Attribute::getAlignmentFromAttrs(Attrs)); + Result += utostr(getAlignment()); Result += " "; } - if (Attrs & Attribute::IANSDialect) - Result += "ia_nsdialect "; - // Trim the trailing space. assert(!Result.empty() && "Unknown attribute!"); Result.erase(Result.end()-1); return Result; } -Attributes Attribute::typeIncompatible(Type *Ty) { - Attributes Incompatible = None; - - if (!Ty->isIntegerTy()) - // Attributes that only apply to integers. - Incompatible |= SExt | ZExt; - - if (!Ty->isPointerTy()) - // Attributes that only apply to pointers. - Incompatible |= ByVal | Nest | NoAlias | StructRet | NoCapture; - - return Incompatible; +//===----------------------------------------------------------------------===// +// AttrBuilder Implementation +//===----------------------------------------------------------------------===// + +AttrBuilder &AttrBuilder::addAttribute(Attributes::AttrVal Val){ + Bits |= AttributesImpl::getAttrMask(Val); + return *this; +} + +AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) { + Bits |= Val; + return *this; +} + +AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) { + if (Align == 0) return *this; + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x40000000 && "Alignment too large."); + Bits |= (Log2_32(Align) + 1) << 16; + return *this; +} +AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align){ + // Default alignment, allow the target to define how to align it. + if (Align == 0) return *this; + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x100 && "Alignment too large."); + Bits |= (Log2_32(Align) + 1) << 26; + return *this; +} + +AttrBuilder &AttrBuilder::removeAttribute(Attributes::AttrVal Val) { + Bits &= ~AttributesImpl::getAttrMask(Val); + return *this; +} + +AttrBuilder &AttrBuilder::addAttributes(const Attributes &A) { + Bits |= A.Raw(); + return *this; +} + +AttrBuilder &AttrBuilder::removeAttributes(const Attributes &A){ + Bits &= ~A.Raw(); + return *this; +} + +bool AttrBuilder::hasAttribute(Attributes::AttrVal A) const { + return Bits & AttributesImpl::getAttrMask(A); +} + +bool AttrBuilder::hasAttributes() const { + return Bits != 0; +} +bool AttrBuilder::hasAttributes(const Attributes &A) const { + return Bits & A.Raw(); +} +bool AttrBuilder::hasAlignmentAttr() const { + return Bits & AttributesImpl::getAttrMask(Attributes::Alignment); +} + +uint64_t AttrBuilder::getAlignment() const { + if (!hasAlignmentAttr()) + return 0; + return 1U << + (((Bits & AttributesImpl::getAttrMask(Attributes::Alignment)) >> 16) - 1); +} + +uint64_t AttrBuilder::getStackAlignment() const { + if (!hasAlignmentAttr()) + return 0; + return 1U << + (((Bits & AttributesImpl::getAttrMask(Attributes::StackAlignment))>>26)-1); } //===----------------------------------------------------------------------===// -// AttributeListImpl Definition +// AttributeImpl Definition //===----------------------------------------------------------------------===// -namespace llvm { - class AttributeListImpl; +uint64_t AttributesImpl::getAttrMask(uint64_t Val) { + switch (Val) { + case Attributes::None: return 0; + case Attributes::ZExt: return 1 << 0; + case Attributes::SExt: return 1 << 1; + case Attributes::NoReturn: return 1 << 2; + case Attributes::InReg: return 1 << 3; + case Attributes::StructRet: return 1 << 4; + case Attributes::NoUnwind: return 1 << 5; + case Attributes::NoAlias: return 1 << 6; + case Attributes::ByVal: return 1 << 7; + case Attributes::Nest: return 1 << 8; + case Attributes::ReadNone: return 1 << 9; + case Attributes::ReadOnly: return 1 << 10; + case Attributes::NoInline: return 1 << 11; + case Attributes::AlwaysInline: return 1 << 12; + case Attributes::OptimizeForSize: return 1 << 13; + case Attributes::StackProtect: return 1 << 14; + case Attributes::StackProtectReq: return 1 << 15; + case Attributes::Alignment: return 31 << 16; + case Attributes::NoCapture: return 1 << 21; + case Attributes::NoRedZone: return 1 << 22; + case Attributes::NoImplicitFloat: return 1 << 23; + case Attributes::Naked: return 1 << 24; + case Attributes::InlineHint: return 1 << 25; + case Attributes::StackAlignment: return 7 << 26; + case Attributes::ReturnsTwice: return 1 << 29; + case Attributes::UWTable: return 1 << 30; + case Attributes::NonLazyBind: return 1U << 31; + case Attributes::AddressSafety: return 1ULL << 32; + case Attributes::MinSize: return 1ULL << 33; + } + llvm_unreachable("Unsupported attribute type"); } -static ManagedStatic<FoldingSet<AttributeListImpl> > AttributesLists; +bool AttributesImpl::hasAttribute(uint64_t A) const { + return (Bits & getAttrMask(A)) != 0; +} -namespace llvm { -static ManagedStatic<sys::SmartMutex<true> > ALMutex; +bool AttributesImpl::hasAttributes() const { + return Bits != 0; +} -class AttributeListImpl : public FoldingSetNode { - sys::cas_flag RefCount; - - // AttributesList is uniqued, these should not be publicly available. - void operator=(const AttributeListImpl &); // Do not implement - AttributeListImpl(const AttributeListImpl &); // Do not implement - ~AttributeListImpl(); // Private implementation -public: - SmallVector<AttributeWithIndex, 4> Attrs; - - AttributeListImpl(ArrayRef<AttributeWithIndex> attrs) - : Attrs(attrs.begin(), attrs.end()) { - RefCount = 0; - } - - void AddRef() { - sys::SmartScopedLock<true> Lock(*ALMutex); - ++RefCount; - } - void DropRef() { - sys::SmartScopedLock<true> Lock(*ALMutex); - if (!AttributesLists.isConstructed()) - return; - sys::cas_flag new_val = --RefCount; - if (new_val == 0) - delete this; - } - - void Profile(FoldingSetNodeID &ID) const { - Profile(ID, Attrs); - } - static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeWithIndex> Attrs){ - for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { - ID.AddInteger(Attrs[i].Attrs.Raw()); - ID.AddInteger(Attrs[i].Index); - } - } -}; +bool AttributesImpl::hasAttributes(const Attributes &A) const { + return Bits & A.Raw(); // FIXME: Raw() won't work here in the future. } -AttributeListImpl::~AttributeListImpl() { - // NOTE: Lock must be acquired by caller. - AttributesLists->RemoveNode(this); +uint64_t AttributesImpl::getAlignment() const { + return Bits & getAttrMask(Attributes::Alignment); } +uint64_t AttributesImpl::getStackAlignment() const { + return Bits & getAttrMask(Attributes::StackAlignment); +} -AttrListPtr AttrListPtr::get(ArrayRef<AttributeWithIndex> Attrs) { +//===----------------------------------------------------------------------===// +// AttributeListImpl Definition +//===----------------------------------------------------------------------===// + +AttrListPtr AttrListPtr::get(LLVMContext &C, + ArrayRef<AttributeWithIndex> Attrs) { // If there are no attributes then return a null AttributesList pointer. if (Attrs.empty()) return AttrListPtr(); - + #ifndef NDEBUG for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { - assert(Attrs[i].Attrs != Attribute::None && + assert(Attrs[i].Attrs.hasAttributes() && "Pointless attribute!"); assert((!i || Attrs[i-1].Index < Attrs[i].Index) && "Misordered AttributesList!"); } #endif - + // Otherwise, build a key to look up the existing attributes. + LLVMContextImpl *pImpl = C.pImpl; FoldingSetNodeID ID; AttributeListImpl::Profile(ID, Attrs); - void *InsertPos; - - sys::SmartScopedLock<true> Lock(*ALMutex); - - AttributeListImpl *PAL = - AttributesLists->FindNodeOrInsertPos(ID, InsertPos); - + + void *InsertPoint; + AttributeListImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID, + InsertPoint); + // If we didn't find any existing attributes of the same shape then // create a new one and insert it. - if (!PAL) { - PAL = new AttributeListImpl(Attrs); - AttributesLists->InsertNode(PAL, InsertPos); + if (!PA) { + PA = new AttributeListImpl(Attrs); + pImpl->AttrsLists.InsertNode(PA, InsertPoint); } - + // Return the AttributesList that we found or created. - return AttrListPtr(PAL); + return AttrListPtr(PA); } - //===----------------------------------------------------------------------===// // AttrListPtr Method Implementations //===----------------------------------------------------------------------===// -AttrListPtr::AttrListPtr(AttributeListImpl *LI) : AttrList(LI) { - if (LI) LI->AddRef(); -} - -AttrListPtr::AttrListPtr(const AttrListPtr &P) : AttrList(P.AttrList) { - if (AttrList) AttrList->AddRef(); -} - const AttrListPtr &AttrListPtr::operator=(const AttrListPtr &RHS) { - sys::SmartScopedLock<true> Lock(*ALMutex); if (AttrList == RHS.AttrList) return *this; - if (AttrList) AttrList->DropRef(); + AttrList = RHS.AttrList; - if (AttrList) AttrList->AddRef(); return *this; } -AttrListPtr::~AttrListPtr() { - if (AttrList) AttrList->DropRef(); -} - -/// getNumSlots - Return the number of slots used in this attribute list. +/// getNumSlots - Return the number of slots used in this attribute list. /// This is the number of arguments that have an attribute set on them /// (including the function itself). unsigned AttrListPtr::getNumSlots() const { @@ -245,48 +415,60 @@ const AttributeWithIndex &AttrListPtr::getSlot(unsigned Slot) const { return AttrList->Attrs[Slot]; } - -/// getAttributes - The attributes for the specified index are -/// returned. Attributes for the result are denoted with Idx = 0. -/// Function notes are denoted with idx = ~0. +/// getAttributes - The attributes for the specified index are returned. +/// Attributes for the result are denoted with Idx = 0. Function notes are +/// denoted with idx = ~0. Attributes AttrListPtr::getAttributes(unsigned Idx) const { - if (AttrList == 0) return Attribute::None; - + if (AttrList == 0) return Attributes(); + const SmallVector<AttributeWithIndex, 4> &Attrs = AttrList->Attrs; for (unsigned i = 0, e = Attrs.size(); i != e && Attrs[i].Index <= Idx; ++i) if (Attrs[i].Index == Idx) return Attrs[i].Attrs; - return Attribute::None; + + return Attributes(); } /// hasAttrSomewhere - Return true if the specified attribute is set for at /// least one parameter or for the return value. -bool AttrListPtr::hasAttrSomewhere(Attributes Attr) const { +bool AttrListPtr::hasAttrSomewhere(Attributes::AttrVal Attr) const { if (AttrList == 0) return false; - + const SmallVector<AttributeWithIndex, 4> &Attrs = AttrList->Attrs; for (unsigned i = 0, e = Attrs.size(); i != e; ++i) - if (Attrs[i].Attrs & Attr) + if (Attrs[i].Attrs.hasAttribute(Attr)) return true; + return false; } +unsigned AttrListPtr::getNumAttrs() const { + return AttrList ? AttrList->Attrs.size() : 0; +} + +Attributes &AttrListPtr::getAttributesAtIndex(unsigned i) const { + assert(AttrList && "Trying to get an attribute from an empty list!"); + assert(i < AttrList->Attrs.size() && "Index out of range!"); + return AttrList->Attrs[i].Attrs; +} -AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const { +AttrListPtr AttrListPtr::addAttr(LLVMContext &C, unsigned Idx, + Attributes Attrs) const { Attributes OldAttrs = getAttributes(Idx); #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. // For now, say we can't change a known alignment. - Attributes OldAlign = OldAttrs & Attribute::Alignment; - Attributes NewAlign = Attrs & Attribute::Alignment; + unsigned OldAlign = OldAttrs.getAlignment(); + unsigned NewAlign = Attrs.getAlignment(); assert((!OldAlign || !NewAlign || OldAlign == NewAlign) && "Attempt to change alignment!"); #endif - - Attributes NewAttrs = OldAttrs | Attrs; - if (NewAttrs == OldAttrs) + + AttrBuilder NewAttrs = + AttrBuilder(OldAttrs).addAttributes(Attrs); + if (NewAttrs == AttrBuilder(OldAttrs)) return *this; - + SmallVector<AttributeWithIndex, 8> NewAttrList; if (AttrList == 0) NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs)); @@ -299,61 +481,67 @@ AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const { // If there are attributes already at this index, merge them in. if (i != e && OldAttrList[i].Index == Idx) { - Attrs |= OldAttrList[i].Attrs; + Attrs = + Attributes::get(C, AttrBuilder(Attrs). + addAttributes(OldAttrList[i].Attrs)); ++i; } - + NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs)); - + // Copy attributes for arguments after this one. - NewAttrList.insert(NewAttrList.end(), + NewAttrList.insert(NewAttrList.end(), OldAttrList.begin()+i, OldAttrList.end()); } - - return get(NewAttrList); + + return get(C, NewAttrList); } -AttrListPtr AttrListPtr::removeAttr(unsigned Idx, Attributes Attrs) const { +AttrListPtr AttrListPtr::removeAttr(LLVMContext &C, unsigned Idx, + Attributes Attrs) const { #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. // For now, say we can't pass in alignment, which no current use does. - assert(!(Attrs & Attribute::Alignment) && "Attempt to exclude alignment!"); + assert(!Attrs.hasAttribute(Attributes::Alignment) && + "Attempt to exclude alignment!"); #endif if (AttrList == 0) return AttrListPtr(); - + Attributes OldAttrs = getAttributes(Idx); - Attributes NewAttrs = OldAttrs & ~Attrs; - if (NewAttrs == OldAttrs) + AttrBuilder NewAttrs = + AttrBuilder(OldAttrs).removeAttributes(Attrs); + if (NewAttrs == AttrBuilder(OldAttrs)) return *this; SmallVector<AttributeWithIndex, 8> NewAttrList; const SmallVector<AttributeWithIndex, 4> &OldAttrList = AttrList->Attrs; unsigned i = 0, e = OldAttrList.size(); - + // Copy attributes for arguments before this one. for (; i != e && OldAttrList[i].Index < Idx; ++i) NewAttrList.push_back(OldAttrList[i]); - + // If there are attributes already at this index, merge them in. assert(OldAttrList[i].Index == Idx && "Attribute isn't set?"); - Attrs = OldAttrList[i].Attrs & ~Attrs; + Attrs = Attributes::get(C, AttrBuilder(OldAttrList[i].Attrs). + removeAttributes(Attrs)); ++i; - if (Attrs) // If any attributes left for this parameter, add them. + if (Attrs.hasAttributes()) // If any attributes left for this param, add them. NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs)); - + // Copy attributes for arguments after this one. - NewAttrList.insert(NewAttrList.end(), + NewAttrList.insert(NewAttrList.end(), OldAttrList.begin()+i, OldAttrList.end()); - - return get(NewAttrList); + + return get(C, NewAttrList); } void AttrListPtr::dump() const { dbgs() << "PAL[ "; for (unsigned i = 0; i < getNumSlots(); ++i) { const AttributeWithIndex &PAWI = getSlot(i); - dbgs() << "{" << PAWI.Index << "," << PAWI.Attrs << "} "; + dbgs() << "{" << PAWI.Index << "," << PAWI.Attrs.getAsString() << "} "; } - + dbgs() << "]\n"; } diff --git a/lib/VMCore/AttributesImpl.h b/lib/VMCore/AttributesImpl.h new file mode 100644 index 0000000..5c107e1 --- /dev/null +++ b/lib/VMCore/AttributesImpl.h @@ -0,0 +1,71 @@ +//===-- AttributesImpl.h - Attributes Internals -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various helper methods and classes used by LLVMContextImpl +// for creating and managing attributes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ATTRIBUTESIMPL_H +#define LLVM_ATTRIBUTESIMPL_H + +#include "llvm/Attributes.h" +#include "llvm/ADT/FoldingSet.h" + +namespace llvm { + +class AttributesImpl : public FoldingSetNode { + uint64_t Bits; // FIXME: We will be expanding this. +public: + AttributesImpl(uint64_t bits) : Bits(bits) {} + + bool hasAttribute(uint64_t A) const; + + bool hasAttributes() const; + bool hasAttributes(const Attributes &A) const; + + uint64_t getAlignment() const; + uint64_t getStackAlignment() const; + + uint64_t Raw() const { return Bits; } // FIXME: Remove. + + static uint64_t getAttrMask(uint64_t Val); + + void Profile(FoldingSetNodeID &ID) const { + Profile(ID, Bits); + } + static void Profile(FoldingSetNodeID &ID, uint64_t Bits) { + ID.AddInteger(Bits); + } +}; + +class AttributeListImpl : public FoldingSetNode { + // AttributesList is uniqued, these should not be publicly available. + void operator=(const AttributeListImpl &) LLVM_DELETED_FUNCTION; + AttributeListImpl(const AttributeListImpl &) LLVM_DELETED_FUNCTION; +public: + SmallVector<AttributeWithIndex, 4> Attrs; + + AttributeListImpl(ArrayRef<AttributeWithIndex> attrs) + : Attrs(attrs.begin(), attrs.end()) {} + + void Profile(FoldingSetNodeID &ID) const { + Profile(ID, Attrs); + } + static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeWithIndex> Attrs){ + for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { + ID.AddInteger(Attrs[i].Attrs.Raw()); + ID.AddInteger(Attrs[i].Index); + } + } +}; + +} // end llvm namespace + +#endif diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp index 094ca75..5fff460 100644 --- a/lib/VMCore/AutoUpgrade.cpp +++ b/lib/VMCore/AutoUpgrade.cpp @@ -148,7 +148,8 @@ bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { if (NewFn) F = NewFn; if (unsigned id = F->getIntrinsicID()) - F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id)); + F->setAttributes(Intrinsic::getAttributes(F->getContext(), + (Intrinsic::ID)id)); return Upgraded; } diff --git a/lib/VMCore/CMakeLists.txt b/lib/VMCore/CMakeLists.txt index 6a20be6..06eab0e 100644 --- a/lib/VMCore/CMakeLists.txt +++ b/lib/VMCore/CMakeLists.txt @@ -1,5 +1,3 @@ -set(LLVM_REQUIRES_RTTI 1) - add_llvm_library(LLVMCore AsmWriter.cpp Attributes.cpp @@ -8,6 +6,7 @@ add_llvm_library(LLVMCore ConstantFold.cpp Constants.cpp Core.cpp + DataLayout.cpp DebugInfo.cpp DebugLoc.cpp DIBuilder.cpp @@ -32,6 +31,7 @@ add_llvm_library(LLVMCore PrintModulePass.cpp Type.cpp TypeFinder.cpp + TargetTransformInfo.cpp Use.cpp User.cpp Value.cpp @@ -42,7 +42,7 @@ add_llvm_library(LLVMCore # Workaround: It takes over 20 minutes to compile with msvc10. # FIXME: Suppressing optimizations to core libraries would not be good thing. -if( MSVC_VERSION EQUAL 1600 ) +if( MSVC_VERSION LESS 1700 ) set_property( SOURCE Function.cpp PROPERTY COMPILE_FLAGS "/Og-" diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp index 8e82876..fe3edac 100644 --- a/lib/VMCore/ConstantFold.cpp +++ b/lib/VMCore/ConstantFold.cpp @@ -12,7 +12,7 @@ // ConstantExpr::get* methods to automatically fold constants when possible. // // The current constant folding implementation is implemented in two pieces: the -// pieces that don't need TargetData, and the pieces that do. This is to avoid +// pieces that don't need DataLayout, and the pieces that do. This is to avoid // a dependence in VMCore on Target. // //===----------------------------------------------------------------------===// @@ -87,9 +87,13 @@ foldConstantCastPair( Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode()); Instruction::CastOps secondOp = Instruction::CastOps(opc); + // Assume that pointers are never more than 64 bits wide. + IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext()); + // Let CastInst::isEliminableCastPair do the heavy lifting. return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy, - Type::getInt64Ty(DstTy->getContext())); + FakeIntPtrTy, FakeIntPtrTy, + FakeIntPtrTy); } static Constant *FoldBitCast(Constant *V, Type *DestTy) { @@ -514,10 +518,6 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, return UndefValue::get(DestTy); } - // No compile-time operations on this type yet. - if (V->getType()->isPPC_FP128Ty() || DestTy->isPPC_FP128Ty()) - return 0; - if (V->isNullValue() && !DestTy->isX86_MMXTy()) return Constant::getNullValue(DestTy); @@ -576,6 +576,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, DestTy->isDoubleTy() ? APFloat::IEEEdouble : DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended : DestTy->isFP128Ty() ? APFloat::IEEEquad : + DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble : APFloat::Bogus, APFloat::rmNearestTiesToEven, &ignored); return ConstantFP::get(V->getContext(), Val); @@ -646,7 +647,8 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, case Instruction::SIToFP: if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { APInt api = CI->getValue(); - APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()), true); + APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()), + !DestTy->isPPC_FP128Ty() /* isEEEE */); (void)apf.convertFromAPInt(api, opc==Instruction::SIToFP, APFloat::rmNearestTiesToEven); @@ -867,10 +869,6 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg, Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1, Constant *C2) { - // No compile-time operations on this type yet. - if (C1->getType()->isPPC_FP128Ty()) - return 0; - // Handle UndefValue up front. if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) { switch (Opcode) { @@ -1273,10 +1271,6 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) { assert(V1->getType() == V2->getType() && "Cannot compare values of different types!"); - // No compile-time operations on this type yet. - if (V1->getType()->isPPC_FP128Ty()) - return FCmpInst::BAD_FCMP_PREDICATE; - // Handle degenerate case quickly if (V1 == V2) return FCmpInst::FCMP_OEQ; @@ -1602,10 +1596,6 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred, return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(pred)); } - // No compile-time operations on this type yet. - if (C1->getType()->isPPC_FP128Ty()) - return 0; - // icmp eq/ne(null,GV) -> false/true if (C1->isNullValue()) { if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2)) diff --git a/lib/VMCore/Constants.cpp b/lib/VMCore/Constants.cpp index a4e21e1..edd6a73 100644 --- a/lib/VMCore/Constants.cpp +++ b/lib/VMCore/Constants.cpp @@ -245,6 +245,33 @@ bool Constant::canTrap() const { } } +/// isThreadDependent - Return true if the value can vary between threads. +bool Constant::isThreadDependent() const { + SmallPtrSet<const Constant*, 64> Visited; + SmallVector<const Constant*, 64> WorkList; + WorkList.push_back(this); + Visited.insert(this); + + while (!WorkList.empty()) { + const Constant *C = WorkList.pop_back_val(); + + if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { + if (GV->isThreadLocal()) + return true; + } + + for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) { + const Constant *D = dyn_cast<Constant>(C->getOperand(I)); + if (!D) + continue; + if (Visited.insert(D)) + WorkList.push_back(D); + } + } + + return false; +} + /// isConstantUsed - Return true if the constant has users other than constant /// exprs and other dangling things. bool Constant::isConstantUsed() const { diff --git a/lib/VMCore/ConstantsContext.h b/lib/VMCore/ConstantsContext.h index 8903a8f..996eb12 100644 --- a/lib/VMCore/ConstantsContext.h +++ b/lib/VMCore/ConstantsContext.h @@ -33,7 +33,7 @@ struct ConstantTraits; /// behind the scenes to implement unary constant exprs. class UnaryConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -50,7 +50,7 @@ public: /// behind the scenes to implement binary constant exprs. class BinaryConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -71,7 +71,7 @@ public: /// behind the scenes to implement select constant exprs. class SelectConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly three operands void *operator new(size_t s) { @@ -92,7 +92,7 @@ public: /// extractelement constant exprs. class ExtractElementConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -113,7 +113,7 @@ public: /// insertelement constant exprs. class InsertElementConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly three operands void *operator new(size_t s) { @@ -135,7 +135,7 @@ public: /// shufflevector constant exprs. class ShuffleVectorConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly three operands void *operator new(size_t s) { @@ -160,7 +160,7 @@ public: /// extractvalue constant exprs. class ExtractValueConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -186,7 +186,7 @@ public: /// insertvalue constant exprs. class InsertValueConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -234,7 +234,7 @@ public: // needed in order to store the predicate value for these instructions. class CompareConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -352,18 +352,21 @@ struct ExprMapKeyType { struct InlineAsmKeyType { InlineAsmKeyType(StringRef AsmString, StringRef Constraints, bool hasSideEffects, - bool isAlignStack) + bool isAlignStack, InlineAsm::AsmDialect asmDialect) : asm_string(AsmString), constraints(Constraints), - has_side_effects(hasSideEffects), is_align_stack(isAlignStack) {} + has_side_effects(hasSideEffects), is_align_stack(isAlignStack), + asm_dialect(asmDialect) {} std::string asm_string; std::string constraints; bool has_side_effects; bool is_align_stack; + InlineAsm::AsmDialect asm_dialect; bool operator==(const InlineAsmKeyType& that) const { return this->asm_string == that.asm_string && this->constraints == that.constraints && this->has_side_effects == that.has_side_effects && - this->is_align_stack == that.is_align_stack; + this->is_align_stack == that.is_align_stack && + this->asm_dialect == that.asm_dialect; } bool operator<(const InlineAsmKeyType& that) const { if (this->asm_string != that.asm_string) @@ -374,6 +377,8 @@ struct InlineAsmKeyType { return this->has_side_effects < that.has_side_effects; if (this->is_align_stack != that.is_align_stack) return this->is_align_stack < that.is_align_stack; + if (this->asm_dialect != that.asm_dialect) + return this->asm_dialect < that.asm_dialect; return false; } @@ -490,7 +495,8 @@ template<> struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType> { static InlineAsm *create(PointerType *Ty, const InlineAsmKeyType &Key) { return new InlineAsm(Ty, Key.asm_string, Key.constraints, - Key.has_side_effects, Key.is_align_stack); + Key.has_side_effects, Key.is_align_stack, + Key.asm_dialect); } }; @@ -499,7 +505,8 @@ struct ConstantKeyData<InlineAsm> { typedef InlineAsmKeyType ValType; static ValType getValType(InlineAsm *Asm) { return InlineAsmKeyType(Asm->getAsmString(), Asm->getConstraintString(), - Asm->hasSideEffects(), Asm->isAlignStack()); + Asm->hasSideEffects(), Asm->isAlignStack(), + Asm->getDialect()); } }; diff --git a/lib/VMCore/Core.cpp b/lib/VMCore/Core.cpp index 972db3c..847bc13 100644 --- a/lib/VMCore/Core.cpp +++ b/lib/VMCore/Core.cpp @@ -568,6 +568,19 @@ const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) { return 0; } +unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V) +{ + return cast<MDNode>(unwrap(V))->getNumOperands(); +} + +void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest) +{ + const MDNode *N = cast<MDNode>(unwrap(V)); + const unsigned numOperands = N->getNumOperands(); + for (unsigned i = 0; i < numOperands; i++) + Dest[i] = wrap(N->getOperand(i)); +} + unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name) { if (NamedMDNode *N = unwrap(M)->getNamedMetadata(name)) { @@ -1084,6 +1097,8 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) { return LLVMLinkOnceAnyLinkage; case GlobalValue::LinkOnceODRLinkage: return LLVMLinkOnceODRLinkage; + case GlobalValue::LinkOnceODRAutoHideLinkage: + return LLVMLinkOnceODRAutoHideLinkage; case GlobalValue::WeakAnyLinkage: return LLVMWeakAnyLinkage; case GlobalValue::WeakODRLinkage: @@ -1098,8 +1113,6 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) { return LLVMLinkerPrivateLinkage; case GlobalValue::LinkerPrivateWeakLinkage: return LLVMLinkerPrivateWeakLinkage; - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: - return LLVMLinkerPrivateWeakDefAutoLinkage; case GlobalValue::DLLImportLinkage: return LLVMDLLImportLinkage; case GlobalValue::DLLExportLinkage: @@ -1129,6 +1142,9 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) { case LLVMLinkOnceODRLinkage: GV->setLinkage(GlobalValue::LinkOnceODRLinkage); break; + case LLVMLinkOnceODRAutoHideLinkage: + GV->setLinkage(GlobalValue::LinkOnceODRAutoHideLinkage); + break; case LLVMWeakAnyLinkage: GV->setLinkage(GlobalValue::WeakAnyLinkage); break; @@ -1150,9 +1166,6 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) { case LLVMLinkerPrivateWeakLinkage: GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage); break; - case LLVMLinkerPrivateWeakDefAutoLinkage: - GV->setLinkage(GlobalValue::LinkerPrivateWeakDefAutoLinkage); - break; case LLVMDLLImportLinkage: GV->setLinkage(GlobalValue::DLLImportLinkage); break; @@ -1368,14 +1381,20 @@ void LLVMSetGC(LLVMValueRef Fn, const char *GC) { void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { Function *Func = unwrap<Function>(Fn); const AttrListPtr PAL = Func->getAttributes(); - const AttrListPtr PALnew = PAL.addAttr(~0U, Attributes(PA)); + AttrBuilder B(PA); + const AttrListPtr PALnew = + PAL.addAttr(Func->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(Func->getContext(), B)); Func->setAttributes(PALnew); } void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { Function *Func = unwrap<Function>(Fn); const AttrListPtr PAL = Func->getAttributes(); - const AttrListPtr PALnew = PAL.removeAttr(~0U, Attributes(PA)); + AttrBuilder B(PA); + const AttrListPtr PALnew = + PAL.removeAttr(Func->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(Func->getContext(), B)); Func->setAttributes(PALnew); } @@ -1445,11 +1464,15 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) { } void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA) { - unwrap<Argument>(Arg)->addAttr(Attributes(PA)); + Argument *A = unwrap<Argument>(Arg); + AttrBuilder B(PA); + A->addAttr(Attributes::get(A->getContext(), B)); } void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA) { - unwrap<Argument>(Arg)->removeAttr(Attributes(PA)); + Argument *A = unwrap<Argument>(Arg); + AttrBuilder B(PA); + A->removeAttr(Attributes::get(A->getContext(), B)); } LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) { @@ -1461,8 +1484,10 @@ LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) { void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) { - unwrap<Argument>(Arg)->addAttr( - Attribute::constructAlignmentFromInt(align)); + AttrBuilder B; + B.addAlignmentAttr(align); + unwrap<Argument>(Arg)->addAttr(Attributes:: + get(unwrap<Argument>(Arg)->getContext(), B)); } /*--.. Operations on basic blocks ..........................................--*/ @@ -1651,23 +1676,28 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) { void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute PA) { CallSite Call = CallSite(unwrap<Instruction>(Instr)); + AttrBuilder B(PA); Call.setAttributes( - Call.getAttributes().addAttr(index, Attributes(PA))); + Call.getAttributes().addAttr(Call->getContext(), index, + Attributes::get(Call->getContext(), B))); } void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute PA) { CallSite Call = CallSite(unwrap<Instruction>(Instr)); + AttrBuilder B(PA); Call.setAttributes( - Call.getAttributes().removeAttr(index, Attributes(PA))); + Call.getAttributes().removeAttr(Call->getContext(), index, + Attributes::get(Call->getContext(), B))); } void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index, unsigned align) { CallSite Call = CallSite(unwrap<Instruction>(Instr)); - Call.setAttributes( - Call.getAttributes().addAttr(index, - Attribute::constructAlignmentFromInt(align))); + AttrBuilder B; + B.addAlignmentAttr(align); + Call.setAttributes(Call.getAttributes().addAttr(Call->getContext(), index, + Attributes::get(Call->getContext(), B))); } /*--.. Operations on call instructions (only) ..............................--*/ diff --git a/lib/VMCore/DIBuilder.cpp b/lib/VMCore/DIBuilder.cpp index f5894e9..152b825 100644 --- a/lib/VMCore/DIBuilder.cpp +++ b/lib/VMCore/DIBuilder.cpp @@ -492,7 +492,8 @@ DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name, NULL, Elements, ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang), - Constant::getNullValue(Type::getInt32Ty(VMContext)) + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), }; return DIType(MDNode::get(VMContext, Elts)); } @@ -550,7 +551,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name, uint64_t SizeInBits, uint64_t AlignInBits, DIArray Elements, - DIType ClassType, unsigned Flags) { + DIType ClassType) { // TAG_enumeration_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type), @@ -561,7 +562,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name, ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), ConstantInt::get(Type::getInt32Ty(VMContext), 0), - ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), ClassType, Elements, ConstantInt::get(Type::getInt32Ty(VMContext), 0), @@ -640,6 +641,30 @@ DIType DIBuilder::createArtificialType(DIType Ty) { return DIType(MDNode::get(VMContext, Elts)); } +/// createArtificialType - Create a new DIType with "artificial" flag set. +DIType DIBuilder::createObjectPointerType(DIType Ty) { + if (Ty.isObjectPointer()) + return Ty; + + SmallVector<Value *, 9> Elts; + MDNode *N = Ty; + assert (N && "Unexpected input DIType!"); + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (Value *V = N->getOperand(i)) + Elts.push_back(V); + else + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); + } + + unsigned CurFlags = Ty.getFlags(); + CurFlags = CurFlags | (DIType::FlagObjectPointer | DIType::FlagArtificial); + + // Flags are stored at this slot. + Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags); + + return DIType(MDNode::get(VMContext, Elts)); +} + /// retainType - Retain DIType in a module even if it is not referenced /// through debug info anchors. void DIBuilder::retainType(DIType T) { @@ -682,7 +707,9 @@ DIType DIBuilder::createTemporaryType(DIFile F) { /// can be RAUW'd if the full type is seen. DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope, DIFile F, - unsigned Line, unsigned RuntimeLang) { + unsigned Line, unsigned RuntimeLang, + uint64_t SizeInBits, + uint64_t AlignInBits) { // Create a temporary MDNode. Value *Elts[] = { GetTagConstant(VMContext, Tag), @@ -690,9 +717,8 @@ DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, MDString::get(VMContext, Name), F, ConstantInt::get(Type::getInt32Ty(VMContext), Line), - // To ease transition include sizes etc of 0. - ConstantInt::get(Type::getInt32Ty(VMContext), 0), - ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), ConstantInt::get(Type::getInt32Ty(VMContext), 0), ConstantInt::get(Type::getInt32Ty(VMContext), DIDescriptor::FlagFwdDecl), diff --git a/lib/VMCore/DataLayout.cpp b/lib/VMCore/DataLayout.cpp new file mode 100644 index 0000000..19cf0f5 --- /dev/null +++ b/lib/VMCore/DataLayout.cpp @@ -0,0 +1,749 @@ +//===-- DataLayout.cpp - Data size & alignment routines --------------------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines layout properties related to datatype size/offset/alignment +// information. +// +// This structure should be created once, filled in if the defaults are not +// correct and then passed around by const&. None of the members functions +// require modification to the object. +// +//===----------------------------------------------------------------------===// + +#include "llvm/DataLayout.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Mutex.h" +#include "llvm/ADT/DenseMap.h" +#include <algorithm> +#include <cstdlib> +using namespace llvm; + +// Handle the Pass registration stuff necessary to use DataLayout's. + +// Register the default SparcV9 implementation... +INITIALIZE_PASS(DataLayout, "datalayout", "Data Layout", false, true) +char DataLayout::ID = 0; + +//===----------------------------------------------------------------------===// +// Support for StructLayout +//===----------------------------------------------------------------------===// + +StructLayout::StructLayout(StructType *ST, const DataLayout &TD) { + assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); + StructAlignment = 0; + StructSize = 0; + NumElements = ST->getNumElements(); + + // Loop over each of the elements, placing them in memory. + for (unsigned i = 0, e = NumElements; i != e; ++i) { + Type *Ty = ST->getElementType(i); + unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty); + + // Add padding if necessary to align the data element properly. + if ((StructSize & (TyAlign-1)) != 0) + StructSize = DataLayout::RoundUpAlignment(StructSize, TyAlign); + + // Keep track of maximum alignment constraint. + StructAlignment = std::max(TyAlign, StructAlignment); + + MemberOffsets[i] = StructSize; + StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item + } + + // Empty structures have alignment of 1 byte. + if (StructAlignment == 0) StructAlignment = 1; + + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if ((StructSize & (StructAlignment-1)) != 0) + StructSize = DataLayout::RoundUpAlignment(StructSize, StructAlignment); +} + + +/// getElementContainingOffset - Given a valid offset into the structure, +/// return the structure index that contains it. +unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { + const uint64_t *SI = + std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset); + assert(SI != &MemberOffsets[0] && "Offset not in structure type!"); + --SI; + assert(*SI <= Offset && "upper_bound didn't work"); + assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) && + (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) && + "Upper bound didn't work!"); + + // Multiple fields can have the same offset if any of them are zero sized. + // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop + // at the i32 element, because it is the last element at that offset. This is + // the right one to return, because anything after it will have a higher + // offset, implying that this element is non-empty. + return SI-&MemberOffsets[0]; +} + +//===----------------------------------------------------------------------===// +// LayoutAlignElem, LayoutAlign support +//===----------------------------------------------------------------------===// + +LayoutAlignElem +LayoutAlignElem::get(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + LayoutAlignElem retval; + retval.AlignType = align_type; + retval.ABIAlign = abi_align; + retval.PrefAlign = pref_align; + retval.TypeBitWidth = bit_width; + return retval; +} + +bool +LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const { + return (AlignType == rhs.AlignType + && ABIAlign == rhs.ABIAlign + && PrefAlign == rhs.PrefAlign + && TypeBitWidth == rhs.TypeBitWidth); +} + +const LayoutAlignElem +DataLayout::InvalidAlignmentElem = + LayoutAlignElem::get((AlignTypeEnum) -1, 0, 0, 0); + +//===----------------------------------------------------------------------===// +// PointerAlignElem, PointerAlign support +//===----------------------------------------------------------------------===// + +PointerAlignElem +PointerAlignElem::get(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + PointerAlignElem retval; + retval.AddressSpace = addr_space; + retval.ABIAlign = abi_align; + retval.PrefAlign = pref_align; + retval.TypeBitWidth = bit_width; + return retval; +} + +bool +PointerAlignElem::operator==(const PointerAlignElem &rhs) const { + return (ABIAlign == rhs.ABIAlign + && AddressSpace == rhs.AddressSpace + && PrefAlign == rhs.PrefAlign + && TypeBitWidth == rhs.TypeBitWidth); +} + +const PointerAlignElem +DataLayout::InvalidPointerElem = PointerAlignElem::get(~0U, 0U, 0U, 0U); + +//===----------------------------------------------------------------------===// +// DataLayout Class Implementation +//===----------------------------------------------------------------------===// + +/// getInt - Get an integer ignoring errors. +static int getInt(StringRef R) { + int Result = 0; + R.getAsInteger(10, Result); + return Result; +} + +void DataLayout::init() { + initializeDataLayoutPass(*PassRegistry::getPassRegistry()); + + LayoutMap = 0; + LittleEndian = false; + StackNaturalAlign = 0; + + // Default alignments + setAlignment(INTEGER_ALIGN, 1, 1, 1); // i1 + setAlignment(INTEGER_ALIGN, 1, 1, 8); // i8 + setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16 + setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32 + setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64 + setAlignment(FLOAT_ALIGN, 2, 2, 16); // half + setAlignment(FLOAT_ALIGN, 4, 4, 32); // float + setAlignment(FLOAT_ALIGN, 8, 8, 64); // double + setAlignment(FLOAT_ALIGN, 16, 16, 128); // ppcf128, quad, ... + setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ... + setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ... + setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct + setPointerAlignment(0, 8, 8, 8); +} + +std::string DataLayout::parseSpecifier(StringRef Desc, DataLayout *td) { + + if (td) + td->init(); + + while (!Desc.empty()) { + std::pair<StringRef, StringRef> Split = Desc.split('-'); + StringRef Token = Split.first; + Desc = Split.second; + + if (Token.empty()) + continue; + + Split = Token.split(':'); + StringRef Specifier = Split.first; + Token = Split.second; + + assert(!Specifier.empty() && "Can't be empty here"); + + switch (Specifier[0]) { + case 'E': + if (td) + td->LittleEndian = false; + break; + case 'e': + if (td) + td->LittleEndian = true; + break; + case 'p': { + int AddrSpace = 0; + if (Specifier.size() > 1) { + AddrSpace = getInt(Specifier.substr(1)); + if (AddrSpace < 0 || AddrSpace > (1 << 24)) + return "Invalid address space, must be a positive 24bit integer"; + } + Split = Token.split(':'); + int PointerMemSizeBits = getInt(Split.first); + if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0) + return "invalid pointer size, must be a positive 8-bit multiple"; + + // Pointer ABI alignment. + Split = Split.second.split(':'); + int PointerABIAlignBits = getInt(Split.first); + if (PointerABIAlignBits < 0 || PointerABIAlignBits % 8 != 0) { + return "invalid pointer ABI alignment, " + "must be a positive 8-bit multiple"; + } + + // Pointer preferred alignment. + Split = Split.second.split(':'); + int PointerPrefAlignBits = getInt(Split.first); + if (PointerPrefAlignBits < 0 || PointerPrefAlignBits % 8 != 0) { + return "invalid pointer preferred alignment, " + "must be a positive 8-bit multiple"; + } + + if (PointerPrefAlignBits == 0) + PointerPrefAlignBits = PointerABIAlignBits; + if (td) + td->setPointerAlignment(AddrSpace, PointerABIAlignBits/8, + PointerPrefAlignBits/8, PointerMemSizeBits/8); + break; + } + case 'i': + case 'v': + case 'f': + case 'a': + case 's': { + AlignTypeEnum AlignType; + char field = Specifier[0]; + switch (field) { + default: + case 'i': AlignType = INTEGER_ALIGN; break; + case 'v': AlignType = VECTOR_ALIGN; break; + case 'f': AlignType = FLOAT_ALIGN; break; + case 'a': AlignType = AGGREGATE_ALIGN; break; + case 's': AlignType = STACK_ALIGN; break; + } + int Size = getInt(Specifier.substr(1)); + if (Size < 0) { + return std::string("invalid ") + field + "-size field, " + "must be positive"; + } + + Split = Token.split(':'); + int ABIAlignBits = getInt(Split.first); + if (ABIAlignBits < 0 || ABIAlignBits % 8 != 0) { + return std::string("invalid ") + field +"-abi-alignment field, " + "must be a positive 8-bit multiple"; + } + unsigned ABIAlign = ABIAlignBits / 8; + + Split = Split.second.split(':'); + + int PrefAlignBits = getInt(Split.first); + if (PrefAlignBits < 0 || PrefAlignBits % 8 != 0) { + return std::string("invalid ") + field +"-preferred-alignment field, " + "must be a positive 8-bit multiple"; + } + unsigned PrefAlign = PrefAlignBits / 8; + if (PrefAlign == 0) + PrefAlign = ABIAlign; + + if (td) + td->setAlignment(AlignType, ABIAlign, PrefAlign, Size); + break; + } + case 'n': // Native integer types. + Specifier = Specifier.substr(1); + do { + int Width = getInt(Specifier); + if (Width <= 0) { + return std::string("invalid native integer size \'") + + Specifier.str() + "\', must be a positive integer."; + } + if (td && Width != 0) + td->LegalIntWidths.push_back(Width); + Split = Token.split(':'); + Specifier = Split.first; + Token = Split.second; + } while (!Specifier.empty() || !Token.empty()); + break; + case 'S': { // Stack natural alignment. + int StackNaturalAlignBits = getInt(Specifier.substr(1)); + if (StackNaturalAlignBits < 0 || StackNaturalAlignBits % 8 != 0) { + return "invalid natural stack alignment (S-field), " + "must be a positive 8-bit multiple"; + } + if (td) + td->StackNaturalAlign = StackNaturalAlignBits / 8; + break; + } + default: + break; + } + } + + return ""; +} + +/// Default ctor. +/// +/// @note This has to exist, because this is a pass, but it should never be +/// used. +DataLayout::DataLayout() : ImmutablePass(ID) { + report_fatal_error("Bad DataLayout ctor used. " + "Tool did not specify a DataLayout to use?"); +} + +DataLayout::DataLayout(const Module *M) + : ImmutablePass(ID) { + std::string errMsg = parseSpecifier(M->getDataLayout(), this); + assert(errMsg == "" && "Module M has malformed data layout string."); + (void)errMsg; +} + +void +DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + assert(pref_align < (1 << 16) && "Alignment doesn't fit in bitfield"); + assert(bit_width < (1 << 24) && "Bit width doesn't fit in bitfield"); + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + if (Alignments[i].AlignType == (unsigned)align_type && + Alignments[i].TypeBitWidth == bit_width) { + // Update the abi, preferred alignments. + Alignments[i].ABIAlign = abi_align; + Alignments[i].PrefAlign = pref_align; + return; + } + } + + Alignments.push_back(LayoutAlignElem::get(align_type, abi_align, + pref_align, bit_width)); +} + +void +DataLayout::setPointerAlignment(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + DenseMap<unsigned,PointerAlignElem>::iterator val = Pointers.find(addr_space); + if (val == Pointers.end()) { + Pointers[addr_space] = PointerAlignElem::get(addr_space, + abi_align, pref_align, bit_width); + } else { + val->second.ABIAlign = abi_align; + val->second.PrefAlign = pref_align; + val->second.TypeBitWidth = bit_width; + } +} + +/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or +/// preferred if ABIInfo = false) the layout wants for the specified datatype. +unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, + uint32_t BitWidth, bool ABIInfo, + Type *Ty) const { + // Check to see if we have an exact match and remember the best match we see. + int BestMatchIdx = -1; + int LargestInt = -1; + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + if (Alignments[i].AlignType == (unsigned)AlignType && + Alignments[i].TypeBitWidth == BitWidth) + return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign; + + // The best match so far depends on what we're looking for. + if (AlignType == INTEGER_ALIGN && + Alignments[i].AlignType == INTEGER_ALIGN) { + // The "best match" for integers is the smallest size that is larger than + // the BitWidth requested. + if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 || + Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth)) + BestMatchIdx = i; + // However, if there isn't one that's larger, then we must use the + // largest one we have (see below) + if (LargestInt == -1 || + Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth) + LargestInt = i; + } + } + + // Okay, we didn't find an exact solution. Fall back here depending on what + // is being looked for. + if (BestMatchIdx == -1) { + // If we didn't find an integer alignment, fall back on most conservative. + if (AlignType == INTEGER_ALIGN) { + BestMatchIdx = LargestInt; + } else { + assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!"); + + // By default, use natural alignment for vector types. This is consistent + // with what clang and llvm-gcc do. + unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType()); + Align *= cast<VectorType>(Ty)->getNumElements(); + // If the alignment is not a power of 2, round up to the next power of 2. + // This happens for non-power-of-2 length vectors. + if (Align & (Align-1)) + Align = NextPowerOf2(Align); + return Align; + } + } + + // Since we got a "best match" index, just return it. + return ABIInfo ? Alignments[BestMatchIdx].ABIAlign + : Alignments[BestMatchIdx].PrefAlign; +} + +namespace { + +class StructLayoutMap { + typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy; + LayoutInfoTy LayoutInfo; + +public: + virtual ~StructLayoutMap() { + // Remove any layouts. + for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end(); + I != E; ++I) { + StructLayout *Value = I->second; + Value->~StructLayout(); + free(Value); + } + } + + StructLayout *&operator[](StructType *STy) { + return LayoutInfo[STy]; + } + + // for debugging... + virtual void dump() const {} +}; + +} // end anonymous namespace + +DataLayout::~DataLayout() { + delete static_cast<StructLayoutMap*>(LayoutMap); +} + +const StructLayout *DataLayout::getStructLayout(StructType *Ty) const { + if (!LayoutMap) + LayoutMap = new StructLayoutMap(); + + StructLayoutMap *STM = static_cast<StructLayoutMap*>(LayoutMap); + StructLayout *&SL = (*STM)[Ty]; + if (SL) return SL; + + // Otherwise, create the struct layout. Because it is variable length, we + // malloc it, then use placement new. + int NumElts = Ty->getNumElements(); + StructLayout *L = + (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t)); + + // Set SL before calling StructLayout's ctor. The ctor could cause other + // entries to be added to TheMap, invalidating our reference. + SL = L; + + new (L) StructLayout(Ty, *this); + + return L; +} + +std::string DataLayout::getStringRepresentation() const { + std::string Result; + raw_string_ostream OS(Result); + + OS << (LittleEndian ? "e" : "E"); + SmallVector<unsigned, 8> addrSpaces; + // Lets get all of the known address spaces and sort them + // into increasing order so that we can emit the string + // in a cleaner format. + for (DenseMap<unsigned, PointerAlignElem>::const_iterator + pib = Pointers.begin(), pie = Pointers.end(); + pib != pie; ++pib) { + addrSpaces.push_back(pib->first); + } + std::sort(addrSpaces.begin(), addrSpaces.end()); + for (SmallVector<unsigned, 8>::iterator asb = addrSpaces.begin(), + ase = addrSpaces.end(); asb != ase; ++asb) { + const PointerAlignElem &PI = Pointers.find(*asb)->second; + OS << "-p"; + if (PI.AddressSpace) { + OS << PI.AddressSpace; + } + OS << ":" << PI.TypeBitWidth*8 << ':' << PI.ABIAlign*8 + << ':' << PI.PrefAlign*8; + } + OS << "-S" << StackNaturalAlign*8; + + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + const LayoutAlignElem &AI = Alignments[i]; + OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':' + << AI.ABIAlign*8 << ':' << AI.PrefAlign*8; + } + + if (!LegalIntWidths.empty()) { + OS << "-n" << (unsigned)LegalIntWidths[0]; + + for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i) + OS << ':' << (unsigned)LegalIntWidths[i]; + } + return OS.str(); +} + + +uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const { + assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); + switch (Ty->getTypeID()) { + case Type::LabelTyID: + return getPointerSizeInBits(0); + case Type::PointerTyID: { + unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace(); + return getPointerSizeInBits(AS); + } + case Type::ArrayTyID: { + ArrayType *ATy = cast<ArrayType>(Ty); + return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements(); + } + case Type::StructTyID: + // Get the layout annotation... which is lazily created on demand. + return getStructLayout(cast<StructType>(Ty))->getSizeInBits(); + case Type::IntegerTyID: + return cast<IntegerType>(Ty)->getBitWidth(); + case Type::VoidTyID: + return 8; + case Type::HalfTyID: + return 16; + case Type::FloatTyID: + return 32; + case Type::DoubleTyID: + case Type::X86_MMXTyID: + return 64; + case Type::PPC_FP128TyID: + case Type::FP128TyID: + return 128; + // In memory objects this is always aligned to a higher boundary, but + // only 80 bits contain information. + case Type::X86_FP80TyID: + return 80; + case Type::VectorTyID: { + VectorType *VTy = cast<VectorType>(Ty); + return VTy->getNumElements()*getTypeSizeInBits(VTy->getElementType()); + } + default: + llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type"); + } +} + +/*! + \param abi_or_pref Flag that determines which alignment is returned. true + returns the ABI alignment, false returns the preferred alignment. + \param Ty The underlying type for which alignment is determined. + + Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref + == false) for the requested type \a Ty. + */ +unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { + int AlignType = -1; + + assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); + switch (Ty->getTypeID()) { + // Early escape for the non-numeric types. + case Type::LabelTyID: + return (abi_or_pref + ? getPointerABIAlignment(0) + : getPointerPrefAlignment(0)); + case Type::PointerTyID: { + unsigned AS = dyn_cast<PointerType>(Ty)->getAddressSpace(); + return (abi_or_pref + ? getPointerABIAlignment(AS) + : getPointerPrefAlignment(AS)); + } + case Type::ArrayTyID: + return getAlignment(cast<ArrayType>(Ty)->getElementType(), abi_or_pref); + + case Type::StructTyID: { + // Packed structure types always have an ABI alignment of one. + if (cast<StructType>(Ty)->isPacked() && abi_or_pref) + return 1; + + // Get the layout annotation... which is lazily created on demand. + const StructLayout *Layout = getStructLayout(cast<StructType>(Ty)); + unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); + return std::max(Align, Layout->getAlignment()); + } + case Type::IntegerTyID: + case Type::VoidTyID: + AlignType = INTEGER_ALIGN; + break; + case Type::HalfTyID: + case Type::FloatTyID: + case Type::DoubleTyID: + // PPC_FP128TyID and FP128TyID have different data contents, but the + // same size and alignment, so they look the same here. + case Type::PPC_FP128TyID: + case Type::FP128TyID: + case Type::X86_FP80TyID: + AlignType = FLOAT_ALIGN; + break; + case Type::X86_MMXTyID: + case Type::VectorTyID: + AlignType = VECTOR_ALIGN; + break; + default: + llvm_unreachable("Bad type for getAlignment!!!"); + } + + return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), + abi_or_pref, Ty); +} + +unsigned DataLayout::getABITypeAlignment(Type *Ty) const { + return getAlignment(Ty, true); +} + +/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for +/// an integer type of the specified bitwidth. +unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { + return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0); +} + + +unsigned DataLayout::getCallFrameTypeAlignment(Type *Ty) const { + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) + if (Alignments[i].AlignType == STACK_ALIGN) + return Alignments[i].ABIAlign; + + return getABITypeAlignment(Ty); +} + +unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const { + return getAlignment(Ty, false); +} + +unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const { + unsigned Align = getPrefTypeAlignment(Ty); + assert(!(Align & (Align-1)) && "Alignment is not a power of two!"); + return Log2_32(Align); +} + +/// getIntPtrType - Return an integer type with size at least as big as that +/// of a pointer in the given address space. +IntegerType *DataLayout::getIntPtrType(LLVMContext &C, + unsigned AddressSpace) const { + return IntegerType::get(C, getPointerSizeInBits(AddressSpace)); +} + +/// getIntPtrType - Return an integer (vector of integer) type with size at +/// least as big as that of a pointer of the given pointer (vector of pointer) +/// type. +Type *DataLayout::getIntPtrType(Type *Ty) const { + assert(Ty->isPtrOrPtrVectorTy() && + "Expected a pointer or pointer vector type."); + unsigned NumBits = getTypeSizeInBits(Ty->getScalarType()); + IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); + if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) + return VectorType::get(IntTy, VecTy->getNumElements()); + return IntTy; +} + +uint64_t DataLayout::getIndexedOffset(Type *ptrTy, + ArrayRef<Value *> Indices) const { + Type *Ty = ptrTy; + assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()"); + uint64_t Result = 0; + + generic_gep_type_iterator<Value* const*> + TI = gep_type_begin(ptrTy, Indices); + for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX; + ++CurIDX, ++TI) { + if (StructType *STy = dyn_cast<StructType>(*TI)) { + assert(Indices[CurIDX]->getType() == + Type::getInt32Ty(ptrTy->getContext()) && + "Illegal struct idx"); + unsigned FieldNo = cast<ConstantInt>(Indices[CurIDX])->getZExtValue(); + + // Get structure layout information... + const StructLayout *Layout = getStructLayout(STy); + + // Add in the offset, as calculated by the structure layout info... + Result += Layout->getElementOffset(FieldNo); + + // Update Ty to refer to current element + Ty = STy->getElementType(FieldNo); + } else { + // Update Ty to refer to current element + Ty = cast<SequentialType>(Ty)->getElementType(); + + // Get the array index and the size of each array element. + if (int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue()) + Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty); + } + } + + return Result; +} + +/// getPreferredAlignment - Return the preferred alignment of the specified +/// global. This includes an explicitly requested alignment (if the global +/// has one). +unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const { + Type *ElemType = GV->getType()->getElementType(); + unsigned Alignment = getPrefTypeAlignment(ElemType); + unsigned GVAlignment = GV->getAlignment(); + if (GVAlignment >= Alignment) { + Alignment = GVAlignment; + } else if (GVAlignment != 0) { + Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType)); + } + + if (GV->hasInitializer() && GVAlignment == 0) { + if (Alignment < 16) { + // If the global is not external, see if it is large. If so, give it a + // larger alignment. + if (getTypeSizeInBits(ElemType) > 128) + Alignment = 16; // 16-byte alignment. + } + } + return Alignment; +} + +/// getPreferredAlignmentLog - Return the preferred alignment of the +/// specified global, returned in log form. This includes an explicitly +/// requested alignment (if the global has one). +unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const { + return Log2_32(getPreferredAlignment(GV)); +} diff --git a/lib/VMCore/DebugInfo.cpp b/lib/VMCore/DebugInfo.cpp index c8f8f7d..3029ce2 100644 --- a/lib/VMCore/DebugInfo.cpp +++ b/lib/VMCore/DebugInfo.cpp @@ -111,6 +111,16 @@ Function *DIDescriptor::getFunctionField(unsigned Elt) const { return 0; } +void DIDescriptor::replaceFunctionField(unsigned Elt, Function *F) { + if (DbgNode == 0) + return; + + if (Elt < DbgNode->getNumOperands()) { + MDNode *Node = const_cast<MDNode*>(DbgNode); + Node->replaceOperandWith(Elt, F); + } +} + unsigned DIVariable::getNumAddrElements() const { if (getVersion() <= LLVMDebugVersion8) return DbgNode->getNumOperands()-6; diff --git a/lib/VMCore/Dominators.cpp b/lib/VMCore/Dominators.cpp index 60bdeac..77b2403 100644 --- a/lib/VMCore/Dominators.cpp +++ b/lib/VMCore/Dominators.cpp @@ -161,6 +161,11 @@ bool DominatorTree::dominates(const Instruction *Def, bool DominatorTree::dominates(const BasicBlockEdge &BBE, const BasicBlock *UseBB) const { + // Assert that we have a single edge. We could handle them by simply + // returning false, but since isSingleEdge is linear on the number of + // edges, the callers can normally handle them more efficiently. + assert(BBE.isSingleEdge()); + // If the BB the edge ends in doesn't dominate the use BB, then the // edge also doesn't. const BasicBlock *Start = BBE.getStart(); @@ -207,6 +212,11 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE, bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const { + // Assert that we have a single edge. We could handle them by simply + // returning false, but since isSingleEdge is linear on the number of + // edges, the callers can normally handle them more efficiently. + assert(BBE.isSingleEdge()); + Instruction *UserInst = cast<Instruction>(U.getUser()); // A PHI in the end of the edge is dominated by it. PHINode *PN = dyn_cast<PHINode>(UserInst); diff --git a/lib/VMCore/Function.cpp b/lib/VMCore/Function.cpp index 2e0b316..9c4f2d9 100644 --- a/lib/VMCore/Function.cpp +++ b/lib/VMCore/Function.cpp @@ -78,7 +78,8 @@ unsigned Argument::getArgNo() const { /// in its containing function. bool Argument::hasByValAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::ByVal); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::ByVal); } unsigned Argument::getParamAlignment() const { @@ -91,21 +92,24 @@ unsigned Argument::getParamAlignment() const { /// it in its containing function. bool Argument::hasNestAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::Nest); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::Nest); } /// hasNoAliasAttr - Return true if this argument has the noalias attribute on /// it in its containing function. bool Argument::hasNoAliasAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::NoAlias); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::NoAlias); } /// hasNoCaptureAttr - Return true if this argument has the nocapture attribute /// on it in its containing function. bool Argument::hasNoCaptureAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::NoCapture); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::NoCapture); } /// hasSRetAttr - Return true if this argument has the sret attribute on @@ -114,7 +118,8 @@ bool Argument::hasStructRetAttr() const { if (!getType()->isPointerTy()) return false; if (this != getParent()->arg_begin()) return false; // StructRet param must be first param - return getParent()->paramHasAttr(1, Attribute::StructRet); + return getParent()->getParamAttributes(1). + hasAttribute(Attributes::StructRet); } /// addAttr - Add a Attribute to an argument @@ -180,7 +185,7 @@ Function::Function(FunctionType *Ty, LinkageTypes Linkage, // Ensure intrinsics have the right parameter attributes. if (unsigned IID = getIntrinsicID()) - setAttributes(Intrinsic::getAttributes(Intrinsic::ID(IID))); + setAttributes(Intrinsic::getAttributes(getContext(), Intrinsic::ID(IID))); } @@ -244,13 +249,13 @@ void Function::dropAllReferences() { void Function::addAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.addAttr(i, attr); + PAL = PAL.addAttr(getContext(), i, attr); setAttributes(PAL); } void Function::removeAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.removeAttr(i, attr); + PAL = PAL.removeAttr(getContext(), i, attr); setAttributes(PAL); } diff --git a/lib/VMCore/GCOV.cpp b/lib/VMCore/GCOV.cpp index 003a5d4..ea2f0a6 100644 --- a/lib/VMCore/GCOV.cpp +++ b/lib/VMCore/GCOV.cpp @@ -28,19 +28,19 @@ GCOVFile::~GCOVFile() { } /// isGCDAFile - Return true if Format identifies a .gcda file. -static bool isGCDAFile(GCOVFormat Format) { - return Format == GCDA_402 || Format == GCDA_404; +static bool isGCDAFile(GCOV::GCOVFormat Format) { + return Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404; } /// isGCNOFile - Return true if Format identifies a .gcno file. -static bool isGCNOFile(GCOVFormat Format) { - return Format == GCNO_402 || Format == GCNO_404; +static bool isGCNOFile(GCOV::GCOVFormat Format) { + return Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404; } /// read - Read GCOV buffer. bool GCOVFile::read(GCOVBuffer &Buffer) { - GCOVFormat Format = Buffer.readGCOVFormat(); - if (Format == InvalidGCOV) + GCOV::GCOVFormat Format = Buffer.readGCOVFormat(); + if (Format == GCOV::InvalidGCOV) return false; unsigned i = 0; @@ -48,7 +48,7 @@ bool GCOVFile::read(GCOVBuffer &Buffer) { GCOVFunction *GFun = NULL; if (isGCDAFile(Format)) { // Use existing function while reading .gcda file. - assert (i < Functions.size() && ".gcda data does not match .gcno data"); + assert(i < Functions.size() && ".gcda data does not match .gcno data"); GFun = Functions[i]; } else if (isGCNOFile(Format)){ GFun = new GCOVFunction(); @@ -87,21 +87,21 @@ GCOVFunction::~GCOVFunction() { /// read - Read a aunction from the buffer. Return false if buffer cursor /// does not point to a function tag. -bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { +bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) { if (!Buff.readFunctionTag()) return false; Buff.readInt(); // Function header length Ident = Buff.readInt(); Buff.readInt(); // Checksum #1 - if (Format != GCNO_402) + if (Format != GCOV::GCNO_402) Buff.readInt(); // Checksum #2 Name = Buff.readString(); - if (Format == GCNO_402 || Format == GCNO_404) + if (Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404) Filename = Buff.readString(); - if (Format == GCDA_402 || Format == GCDA_404) { + if (Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404) { Buff.readArcTag(); uint32_t Count = Buff.readInt() / 2; for (unsigned i = 0, e = Count; i != e; ++i) { @@ -113,7 +113,9 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { LineNumber = Buff.readInt(); // read blocks. - assert (Buff.readBlockTag() && "Block Tag not found!"); + bool BlockTagFound = Buff.readBlockTag(); + (void)BlockTagFound; + assert(BlockTagFound && "Block Tag not found!"); uint32_t BlockCount = Buff.readInt(); for (int i = 0, e = BlockCount; i != e; ++i) { Buff.readInt(); // Block flags; @@ -124,7 +126,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { while (Buff.readEdgeTag()) { uint32_t EdgeCount = (Buff.readInt() - 1) / 2; uint32_t BlockNo = Buff.readInt(); - assert (BlockNo < BlockCount && "Unexpected Block number!"); + assert(BlockNo < BlockCount && "Unexpected Block number!"); for (int i = 0, e = EdgeCount; i != e; ++i) { Blocks[BlockNo]->addEdge(Buff.readInt()); Buff.readInt(); // Edge flag @@ -136,7 +138,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { uint32_t LineTableLength = Buff.readInt(); uint32_t Size = Buff.getCursor() + LineTableLength*4; uint32_t BlockNo = Buff.readInt(); - assert (BlockNo < BlockCount && "Unexpected Block number!"); + assert(BlockNo < BlockCount && "Unexpected Block number!"); GCOVBlock *Block = Blocks[BlockNo]; Buff.readInt(); // flag while (Buff.getCursor() != (Size - 4)) { diff --git a/lib/VMCore/IRBuilder.cpp b/lib/VMCore/IRBuilder.cpp index 5c4e6d9..04f08fe 100644 --- a/lib/VMCore/IRBuilder.cpp +++ b/lib/VMCore/IRBuilder.cpp @@ -80,7 +80,7 @@ CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align, CallInst *IRBuilderBase:: CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, - bool isVolatile, MDNode *TBAATag) { + bool isVolatile, MDNode *TBAATag, MDNode *TBAAStructTag) { Dst = getCastedInt8PtrValue(Dst); Src = getCastedInt8PtrValue(Src); @@ -94,6 +94,10 @@ CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, // Set the TBAA info if present. if (TBAATag) CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + // Set the TBAA Struct info if present. + if (TBAAStructTag) + CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); return CI; } diff --git a/lib/VMCore/InlineAsm.cpp b/lib/VMCore/InlineAsm.cpp index 736e370..2e636aa 100644 --- a/lib/VMCore/InlineAsm.cpp +++ b/lib/VMCore/InlineAsm.cpp @@ -27,19 +27,20 @@ InlineAsm::~InlineAsm() { InlineAsm *InlineAsm::get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, - bool isAlignStack) { - InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack); + bool isAlignStack, AsmDialect asmDialect) { + InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack, + asmDialect); LLVMContextImpl *pImpl = Ty->getContext().pImpl; return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(Ty), Key); } InlineAsm::InlineAsm(PointerType *Ty, const std::string &asmString, const std::string &constraints, bool hasSideEffects, - bool isAlignStack) + bool isAlignStack, AsmDialect asmDialect) : Value(Ty, Value::InlineAsmVal), - AsmString(asmString), - Constraints(constraints), HasSideEffects(hasSideEffects), - IsAlignStack(isAlignStack) { + AsmString(asmString), Constraints(constraints), + HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack), + Dialect(asmDialect) { // Do various checks on the constraint string and type. assert(Verify(getFunctionType(), constraints) && diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp index 9af98e8..94bd2a1 100644 --- a/lib/VMCore/Instructions.cpp +++ b/lib/VMCore/Instructions.cpp @@ -332,21 +332,30 @@ CallInst::CallInst(const CallInst &CI) void CallInst::addAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.addAttr(i, attr); + PAL = PAL.addAttr(getContext(), i, attr); setAttributes(PAL); } void CallInst::removeAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.removeAttr(i, attr); + PAL = PAL.removeAttr(getContext(), i, attr); setAttributes(PAL); } -bool CallInst::paramHasAttr(unsigned i, Attributes attr) const { - if (AttributeList.paramHasAttr(i, attr)) +bool CallInst::hasFnAttr(Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(AttrListPtr::FunctionIndex) + .hasAttribute(A)) return true; if (const Function *F = getCalledFunction()) - return F->paramHasAttr(i, attr); + return F->getParamAttributes(AttrListPtr::FunctionIndex).hasAttribute(A); + return false; +} + +bool CallInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(i).hasAttribute(A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getParamAttributes(i).hasAttribute(A); return false; } @@ -562,23 +571,32 @@ void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) { return setSuccessor(idx, B); } -bool InvokeInst::paramHasAttr(unsigned i, Attributes attr) const { - if (AttributeList.paramHasAttr(i, attr)) +bool InvokeInst::hasFnAttr(Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(AttrListPtr::FunctionIndex). + hasAttribute(A)) return true; if (const Function *F = getCalledFunction()) - return F->paramHasAttr(i, attr); + return F->getParamAttributes(AttrListPtr::FunctionIndex).hasAttribute(A); + return false; +} + +bool InvokeInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(i).hasAttribute(A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getParamAttributes(i).hasAttribute(A); return false; } void InvokeInst::addAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.addAttr(i, attr); + PAL = PAL.addAttr(getContext(), i, attr); setAttributes(PAL); } void InvokeInst::removeAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.removeAttr(i, attr); + PAL = PAL.removeAttr(getContext(), i, attr); setAttributes(PAL); } @@ -1381,18 +1399,6 @@ Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList) { return getIndexedTypeInternal(Ptr, IdxList); } -unsigned GetElementPtrInst::getAddressSpace(Value *Ptr) { - Type *Ty = Ptr->getType(); - - if (VectorType *VTy = dyn_cast<VectorType>(Ty)) - Ty = VTy->getElementType(); - - if (PointerType *PTy = dyn_cast<PointerType>(Ty)) - return PTy->getAddressSpace(); - - llvm_unreachable("Invalid GEP pointer type"); -} - /// hasAllZeroIndices - Return true if all of the indices of this GEP are /// zeros. If so, the result pointer and the first operand have the same /// value, just potentially different types. @@ -2112,7 +2118,8 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const { /// If no such cast is permited, the function returns 0. unsigned CastInst::isEliminableCastPair( Instruction::CastOps firstOp, Instruction::CastOps secondOp, - Type *SrcTy, Type *MidTy, Type *DstTy, Type *IntPtrTy) { + Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, + Type *DstIntPtrTy) { // Define the 144 possibilities for these two cast instructions. The values // in this matrix determine what to do in a given situation and select the // case in the switch below. The rows correspond to firstOp, the columns @@ -2215,9 +2222,9 @@ unsigned CastInst::isEliminableCastPair( return 0; case 7: { // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size - if (!IntPtrTy) + if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) return 0; - unsigned PtrSize = IntPtrTy->getScalarSizeInBits(); + unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); unsigned MidSize = MidTy->getScalarSizeInBits(); if (MidSize >= PtrSize) return Instruction::BitCast; @@ -2256,9 +2263,9 @@ unsigned CastInst::isEliminableCastPair( return 0; case 13: { // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize - if (!IntPtrTy) + if (!MidIntPtrTy) return 0; - unsigned PtrSize = IntPtrTy->getScalarSizeInBits(); + unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); unsigned SrcSize = SrcTy->getScalarSizeInBits(); unsigned DstSize = DstTy->getScalarSizeInBits(); if (SrcSize <= PtrSize && SrcSize == DstSize) @@ -2836,7 +2843,7 @@ BitCastInst::BitCastInst( // CmpInst Classes //===----------------------------------------------------------------------===// -void CmpInst::Anchor() const {} +void CmpInst::anchor() {} CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate, Value *LHS, Value *RHS, const Twine &Name, diff --git a/lib/VMCore/LLVMContext.cpp b/lib/VMCore/LLVMContext.cpp index f07f0b3..2446ec9 100644 --- a/lib/VMCore/LLVMContext.cpp +++ b/lib/VMCore/LLVMContext.cpp @@ -53,6 +53,11 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { unsigned RangeID = getMDKindID("range"); assert(RangeID == MD_range && "range kind id drifted"); (void)RangeID; + + // Create the 'tbaa.struct' metadata kind. + unsigned TBAAStructID = getMDKindID("tbaa.struct"); + assert(TBAAStructID == MD_tbaa_struct && "tbaa.struct kind id drifted"); + (void)TBAAStructID; } LLVMContext::~LLVMContext() { delete pImpl; } diff --git a/lib/VMCore/LLVMContextImpl.cpp b/lib/VMCore/LLVMContextImpl.cpp index 6279bb8..d35d284 100644 --- a/lib/VMCore/LLVMContextImpl.cpp +++ b/lib/VMCore/LLVMContextImpl.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "LLVMContextImpl.h" +#include "llvm/Attributes.h" #include "llvm/Module.h" #include "llvm/ADT/STLExtras.h" #include <algorithm> @@ -93,7 +94,21 @@ LLVMContextImpl::~LLVMContextImpl() { E = CDSConstants.end(); I != E; ++I) delete I->second; CDSConstants.clear(); - + + // Destroy attributes. + for (FoldingSetIterator<AttributesImpl> I = AttrsSet.begin(), + E = AttrsSet.end(); I != E; ) { + FoldingSetIterator<AttributesImpl> Elem = I++; + delete &*Elem; + } + + // Destroy attribute lists. + for (FoldingSetIterator<AttributeListImpl> I = AttrsLists.begin(), + E = AttrsLists.end(); I != E; ) { + FoldingSetIterator<AttributeListImpl> Elem = I++; + delete &*Elem; + } + // Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet // and the NonUniquedMDNodes sets, so copy the values out first. SmallVector<MDNode*, 8> MDNodes; @@ -107,6 +122,7 @@ LLVMContextImpl::~LLVMContextImpl() { (*I)->destroy(); assert(MDNodeSet.empty() && NonUniquedMDNodes.empty() && "Destroying all MDNodes didn't empty the Context's sets."); + // Destroy MDStrings. DeleteContainerSeconds(MDStringCache); } diff --git a/lib/VMCore/LLVMContextImpl.h b/lib/VMCore/LLVMContextImpl.h index 2252028..90cf424 100644 --- a/lib/VMCore/LLVMContextImpl.h +++ b/lib/VMCore/LLVMContextImpl.h @@ -16,6 +16,7 @@ #define LLVM_LLVMCONTEXT_IMPL_H #include "llvm/LLVMContext.h" +#include "AttributesImpl.h" #include "ConstantsContext.h" #include "LeaksContext.h" #include "llvm/Constants.h" @@ -253,10 +254,14 @@ public: typedef DenseMap<DenseMapAPFloatKeyInfo::KeyTy, ConstantFP*, DenseMapAPFloatKeyInfo> FPMapTy; FPMapTy FPConstants; - + + FoldingSet<AttributesImpl> AttrsSet; + FoldingSet<AttributeListImpl> AttrsLists; + StringMap<Value*> MDStringCache; - + FoldingSet<MDNode> MDNodeSet; + // MDNodes may be uniqued or not uniqued. When they're not uniqued, they // aren't in the MDNodeSet, but they're still shared between objects, so no // one object can destroy them. This set allows us to at least destroy them diff --git a/lib/VMCore/Makefile b/lib/VMCore/Makefile index 2b9b0f2..8b98651 100644 --- a/lib/VMCore/Makefile +++ b/lib/VMCore/Makefile @@ -9,7 +9,6 @@ LEVEL = ../.. LIBRARYNAME = LLVMCore BUILD_ARCHIVE = 1 -REQUIRES_RTTI = 1 BUILT_SOURCES = $(PROJ_OBJ_ROOT)/include/llvm/Intrinsics.gen diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp index 4530c04..53f1149 100644 --- a/lib/VMCore/PassManager.cpp +++ b/lib/VMCore/PassManager.cpp @@ -1189,7 +1189,7 @@ void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P, assert(PassDebugging >= Details); if (Set.empty()) return; - dbgs() << (void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:"; + dbgs() << (const void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:"; for (unsigned i = 0; i != Set.size(); ++i) { if (i) dbgs() << ','; const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(Set[i]); diff --git a/lib/VMCore/TargetTransformInfo.cpp b/lib/VMCore/TargetTransformInfo.cpp new file mode 100644 index 0000000..e91c29c --- /dev/null +++ b/lib/VMCore/TargetTransformInfo.cpp @@ -0,0 +1,31 @@ +//===- llvm/VMCore/TargetTransformInfo.cpp ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/TargetTransformInfo.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +/// Default ctor. +/// +/// @note This has to exist, because this is a pass, but it should never be +/// used. +TargetTransformInfo::TargetTransformInfo() : ImmutablePass(ID) { + /// You are seeing this error because your pass required the TTI + /// using a call to "getAnalysis<TargetTransformInfo>()", and you did + /// not initialize a machine target which can provide the TTI. + /// You should use "getAnalysisIfAvailable<TargetTransformInfo>()" instead. + report_fatal_error("Bad TargetTransformInfo ctor used. " + "Tool did not specify a TargetTransformInfo to use?"); +} + +INITIALIZE_PASS(TargetTransformInfo, "targettransforminfo", + "Target Transform Info", false, true) +char TargetTransformInfo::ID = 0; + diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp index 5e9a00f..1656ab2 100644 --- a/lib/VMCore/Type.cpp +++ b/lib/VMCore/Type.cpp @@ -47,35 +47,17 @@ Type *Type::getScalarType() { return this; } +const Type *Type::getScalarType() const { + if (const VectorType *VTy = dyn_cast<VectorType>(this)) + return VTy->getElementType(); + return this; +} + /// isIntegerTy - Return true if this is an IntegerType of the specified width. bool Type::isIntegerTy(unsigned Bitwidth) const { return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth; } -/// isIntOrIntVectorTy - Return true if this is an integer type or a vector of -/// integer types. -/// -bool Type::isIntOrIntVectorTy() const { - if (isIntegerTy()) - return true; - if (getTypeID() != Type::VectorTyID) return false; - - return cast<VectorType>(this)->getElementType()->isIntegerTy(); -} - -/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP types. -/// -bool Type::isFPOrFPVectorTy() const { - if (getTypeID() == Type::HalfTyID || getTypeID() == Type::FloatTyID || - getTypeID() == Type::DoubleTyID || - getTypeID() == Type::FP128TyID || getTypeID() == Type::X86_FP80TyID || - getTypeID() == Type::PPC_FP128TyID) - return true; - if (getTypeID() != Type::VectorTyID) return false; - - return cast<VectorType>(this)->getElementType()->isFloatingPointTy(); -} - // canLosslesslyBitCastTo - Return true if this type can be converted to // 'Ty' without any reinterpretation of bits. For example, i8* to i32*. // @@ -220,8 +202,6 @@ Type *Type::getStructElementType(unsigned N) const { return cast<StructType>(this)->getElementType(N); } - - Type *Type::getSequentialElementType() const { return cast<SequentialType>(this)->getElementType(); } @@ -235,12 +215,10 @@ unsigned Type::getVectorNumElements() const { } unsigned Type::getPointerAddressSpace() const { - return cast<PointerType>(this)->getAddressSpace(); + return cast<PointerType>(getScalarType())->getAddressSpace(); } - - //===----------------------------------------------------------------------===// // Primitive 'Type' data //===----------------------------------------------------------------------===// @@ -400,12 +378,10 @@ FunctionType *FunctionType::get(Type *ReturnType, return FT; } - FunctionType *FunctionType::get(Type *Result, bool isVarArg) { return get(Result, ArrayRef<Type *>(), isVarArg); } - /// isValidReturnType - Return true if the specified type is valid as a return /// type. bool FunctionType::isValidReturnType(Type *RetTy) { @@ -553,7 +529,6 @@ StructType *StructType::create(LLVMContext &Context) { return create(Context, StringRef()); } - StructType *StructType::create(ArrayRef<Type*> Elements, StringRef Name, bool isPacked) { assert(!Elements.empty() && @@ -637,7 +612,6 @@ bool StructType::isLayoutIdentical(StructType *Other) const { return std::equal(element_begin(), element_end(), Other->element_begin()); } - /// getTypeByName - Return the type with the specified name, or null if there /// is none by that name. StructType *Module::getTypeByName(StringRef Name) const { @@ -700,7 +674,6 @@ ArrayType::ArrayType(Type *ElType, uint64_t NumEl) NumElements = NumEl; } - ArrayType *ArrayType::get(Type *elementType, uint64_t NumElements) { Type *ElementType = const_cast<Type*>(elementType); assert(isValidElementType(ElementType) && "Invalid type for array element!"); diff --git a/lib/VMCore/User.cpp b/lib/VMCore/User.cpp index 5f35ce4..e847ce6 100644 --- a/lib/VMCore/User.cpp +++ b/lib/VMCore/User.cpp @@ -10,6 +10,7 @@ #include "llvm/Constant.h" #include "llvm/GlobalValue.h" #include "llvm/User.h" +#include "llvm/Operator.h" namespace llvm { @@ -78,4 +79,12 @@ void User::operator delete(void *Usr) { ::operator delete(Storage); } +//===----------------------------------------------------------------------===// +// Operator Class +//===----------------------------------------------------------------------===// + +Operator::~Operator() { + llvm_unreachable("should never destroy an Operator"); +} + } // End llvm namespace diff --git a/lib/VMCore/Value.cpp b/lib/VMCore/Value.cpp index d871108..8d0720d 100644 --- a/lib/VMCore/Value.cpp +++ b/lib/VMCore/Value.cpp @@ -394,7 +394,7 @@ static bool isDereferenceablePointer(const Value *V, // It's also not always safe to follow a bitcast, for example: // bitcast i8* (alloca i8) to i32* // would result in a 4-byte load from a 1-byte alloca. Some cases could - // be handled using TargetData to check sizes and alignments though. + // be handled using DataLayout to check sizes and alignments though. // These are obviously ok. if (isa<AllocaInst>(V)) return true; diff --git a/lib/VMCore/ValueTypes.cpp b/lib/VMCore/ValueTypes.cpp index d1ca953..2ee9f0f 100644 --- a/lib/VMCore/ValueTypes.cpp +++ b/lib/VMCore/ValueTypes.cpp @@ -55,24 +55,32 @@ bool EVT::isExtendedVector() const { return LLVMTy->isVectorTy(); } +bool EVT::isExtended16BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 16; +} + +bool EVT::isExtended32BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 32; +} + bool EVT::isExtended64BitVector() const { - return isExtendedVector() && getSizeInBits() == 64; + return isExtendedVector() && getExtendedSizeInBits() == 64; } bool EVT::isExtended128BitVector() const { - return isExtendedVector() && getSizeInBits() == 128; + return isExtendedVector() && getExtendedSizeInBits() == 128; } bool EVT::isExtended256BitVector() const { - return isExtendedVector() && getSizeInBits() == 256; + return isExtendedVector() && getExtendedSizeInBits() == 256; } bool EVT::isExtended512BitVector() const { - return isExtendedVector() && getSizeInBits() == 512; + return isExtendedVector() && getExtendedSizeInBits() == 512; } bool EVT::isExtended1024BitVector() const { - return isExtendedVector() && getSizeInBits() == 1024; + return isExtendedVector() && getExtendedSizeInBits() == 1024; } EVT EVT::getExtendedVectorElementType() const { @@ -120,15 +128,21 @@ std::string EVT::getEVTString() const { case MVT::Other: return "ch"; case MVT::Glue: return "glue"; case MVT::x86mmx: return "x86mmx"; + case MVT::v2i1: return "v2i1"; + case MVT::v4i1: return "v4i1"; + case MVT::v8i1: return "v8i1"; + case MVT::v16i1: return "v16i1"; case MVT::v2i8: return "v2i8"; case MVT::v4i8: return "v4i8"; case MVT::v8i8: return "v8i8"; case MVT::v16i8: return "v16i8"; case MVT::v32i8: return "v32i8"; + case MVT::v1i16: return "v1i16"; case MVT::v2i16: return "v2i16"; case MVT::v4i16: return "v4i16"; case MVT::v8i16: return "v8i16"; case MVT::v16i16: return "v16i16"; + case MVT::v1i32: return "v1i32"; case MVT::v2i32: return "v2i32"; case MVT::v4i32: return "v4i32"; case MVT::v8i32: return "v8i32"; @@ -171,15 +185,21 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const { case MVT::f128: return Type::getFP128Ty(Context); case MVT::ppcf128: return Type::getPPC_FP128Ty(Context); case MVT::x86mmx: return Type::getX86_MMXTy(Context); + case MVT::v2i1: return VectorType::get(Type::getInt1Ty(Context), 2); + case MVT::v4i1: return VectorType::get(Type::getInt1Ty(Context), 4); + case MVT::v8i1: return VectorType::get(Type::getInt1Ty(Context), 8); + case MVT::v16i1: return VectorType::get(Type::getInt1Ty(Context), 16); case MVT::v2i8: return VectorType::get(Type::getInt8Ty(Context), 2); case MVT::v4i8: return VectorType::get(Type::getInt8Ty(Context), 4); case MVT::v8i8: return VectorType::get(Type::getInt8Ty(Context), 8); case MVT::v16i8: return VectorType::get(Type::getInt8Ty(Context), 16); case MVT::v32i8: return VectorType::get(Type::getInt8Ty(Context), 32); + case MVT::v1i16: return VectorType::get(Type::getInt16Ty(Context), 1); case MVT::v2i16: return VectorType::get(Type::getInt16Ty(Context), 2); case MVT::v4i16: return VectorType::get(Type::getInt16Ty(Context), 4); case MVT::v8i16: return VectorType::get(Type::getInt16Ty(Context), 8); case MVT::v16i16: return VectorType::get(Type::getInt16Ty(Context), 16); + case MVT::v1i32: return VectorType::get(Type::getInt32Ty(Context), 1); case MVT::v2i32: return VectorType::get(Type::getInt32Ty(Context), 2); case MVT::v4i32: return VectorType::get(Type::getInt32Ty(Context), 4); case MVT::v8i32: return VectorType::get(Type::getInt32Ty(Context), 8); diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp index 38914b3..eb40b09 100644 --- a/lib/VMCore/Verifier.cpp +++ b/lib/VMCore/Verifier.cpp @@ -400,8 +400,8 @@ void Verifier::visitGlobalValue(GlobalValue &GV) { "Only global arrays can have appending linkage!", GVar); } - Assert1(!GV.hasLinkerPrivateWeakDefAutoLinkage() || GV.hasDefaultVisibility(), - "linker_private_weak_def_auto can only have default visibility!", + Assert1(!GV.hasLinkOnceODRAutoHideLinkage() || GV.hasDefaultVisibility(), + "linkonce_odr_auto_hide can only have default visibility!", &GV); } @@ -526,40 +526,60 @@ void Verifier::visitMDNode(MDNode &MD, Function *F) { // value of the specified type. The value V is printed in error messages. void Verifier::VerifyParameterAttrs(Attributes Attrs, Type *Ty, bool isReturnValue, const Value *V) { - if (Attrs == Attribute::None) + if (!Attrs.hasAttributes()) return; - Attributes FnCheckAttr = Attrs & Attribute::FunctionOnly; - Assert1(!FnCheckAttr, "Attribute " + Attribute::getAsString(FnCheckAttr) + - " only applies to the function!", V); - - if (isReturnValue) { - Attributes RetI = Attrs & Attribute::ParameterOnly; - Assert1(!RetI, "Attribute " + Attribute::getAsString(RetI) + - " does not apply to return values!", V); - } - - for (unsigned i = 0; - i < array_lengthof(Attribute::MutuallyIncompatible); ++i) { - Attributes MutI = Attrs & Attribute::MutuallyIncompatible[i]; - Assert1(MutI.isEmptyOrSingleton(), "Attributes " + - Attribute::getAsString(MutI) + " are incompatible!", V); - } - - Attributes TypeI = Attrs & Attribute::typeIncompatible(Ty); - Assert1(!TypeI, "Wrong type for attribute " + - Attribute::getAsString(TypeI), V); - - Attributes ByValI = Attrs & Attribute::ByVal; - if (PointerType *PTy = dyn_cast<PointerType>(Ty)) { - Assert1(!ByValI || PTy->getElementType()->isSized(), - "Attribute " + Attribute::getAsString(ByValI) + - " does not support unsized types!", V); - } else { - Assert1(!ByValI, - "Attribute " + Attribute::getAsString(ByValI) + - " only applies to parameters with pointer type!", V); - } + Assert1(!Attrs.hasFunctionOnlyAttrs(), + "Some attributes in '" + Attrs.getAsString() + + "' only apply to functions!", V); + + if (isReturnValue) + Assert1(!Attrs.hasParameterOnlyAttrs(), + "Attributes 'byval', 'nest', 'sret', and 'nocapture' " + "do not apply to return values!", V); + + // Check for mutually incompatible attributes. + Assert1(!((Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::Nest)) || + (Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::StructRet)) || + (Attrs.hasAttribute(Attributes::Nest) && + Attrs.hasAttribute(Attributes::StructRet))), "Attributes " + "'byval, nest, and sret' are incompatible!", V); + + Assert1(!((Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::Nest)) || + (Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::InReg)) || + (Attrs.hasAttribute(Attributes::Nest) && + Attrs.hasAttribute(Attributes::InReg))), "Attributes " + "'byval, nest, and inreg' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Attributes::ZExt) && + Attrs.hasAttribute(Attributes::SExt)), "Attributes " + "'zeroext and signext' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Attributes::ReadNone) && + Attrs.hasAttribute(Attributes::ReadOnly)), "Attributes " + "'readnone and readonly' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Attributes::NoInline) && + Attrs.hasAttribute(Attributes::AlwaysInline)), "Attributes " + "'noinline and alwaysinline' are incompatible!", V); + + Assert1(!AttrBuilder(Attrs). + hasAttributes(Attributes::typeIncompatible(Ty)), + "Wrong types for attribute: " + + Attributes::typeIncompatible(Ty).getAsString(), V); + + if (PointerType *PTy = dyn_cast<PointerType>(Ty)) + Assert1(!Attrs.hasAttribute(Attributes::ByVal) || + PTy->getElementType()->isSized(), + "Attribute 'byval' does not support unsized types!", V); + else + Assert1(!Attrs.hasAttribute(Attributes::ByVal), + "Attribute 'byval' only applies to parameters with pointer type!", + V); } // VerifyFunctionAttrs - Check parameter attributes against a function type. @@ -585,26 +605,50 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT, VerifyParameterAttrs(Attr.Attrs, Ty, Attr.Index == 0, V); - if (Attr.Attrs & Attribute::Nest) { + if (Attr.Attrs.hasAttribute(Attributes::Nest)) { Assert1(!SawNest, "More than one parameter has attribute nest!", V); SawNest = true; } - if (Attr.Attrs & Attribute::StructRet) + if (Attr.Attrs.hasAttribute(Attributes::StructRet)) Assert1(Attr.Index == 1, "Attribute sret not on first parameter!", V); } Attributes FAttrs = Attrs.getFnAttributes(); - Attributes NotFn = FAttrs & (~Attribute::FunctionOnly); - Assert1(!NotFn, "Attribute " + Attribute::getAsString(NotFn) + - " does not apply to the function!", V); - - for (unsigned i = 0; - i < array_lengthof(Attribute::MutuallyIncompatible); ++i) { - Attributes MutI = FAttrs & Attribute::MutuallyIncompatible[i]; - Assert1(MutI.isEmptyOrSingleton(), "Attributes " + - Attribute::getAsString(MutI) + " are incompatible!", V); - } + AttrBuilder NotFn(FAttrs); + NotFn.removeFunctionOnlyAttrs(); + Assert1(!NotFn.hasAttributes(), "Attributes '" + + Attributes::get(V->getContext(), NotFn).getAsString() + + "' do not apply to the function!", V); + + // Check for mutually incompatible attributes. + Assert1(!((FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::Nest)) || + (FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::StructRet)) || + (FAttrs.hasAttribute(Attributes::Nest) && + FAttrs.hasAttribute(Attributes::StructRet))), "Attributes " + "'byval, nest, and sret' are incompatible!", V); + + Assert1(!((FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::Nest)) || + (FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::InReg)) || + (FAttrs.hasAttribute(Attributes::Nest) && + FAttrs.hasAttribute(Attributes::InReg))), "Attributes " + "'byval, nest, and inreg' are incompatible!", V); + + Assert1(!(FAttrs.hasAttribute(Attributes::ZExt) && + FAttrs.hasAttribute(Attributes::SExt)), "Attributes " + "'zeroext and signext' are incompatible!", V); + + Assert1(!(FAttrs.hasAttribute(Attributes::ReadNone) && + FAttrs.hasAttribute(Attributes::ReadOnly)), "Attributes " + "'readnone and readonly' are incompatible!", V); + + Assert1(!(FAttrs.hasAttribute(Attributes::NoInline) && + FAttrs.hasAttribute(Attributes::AlwaysInline)), "Attributes " + "'noinline and alwaysinline' are incompatible!", V); } static bool VerifyAttributeCount(const AttrListPtr &Attrs, unsigned Params) { @@ -661,6 +705,7 @@ void Verifier::visitFunction(Function &F) { case CallingConv::Cold: case CallingConv::X86_FastCall: case CallingConv::X86_ThisCall: + case CallingConv::Intel_OCL_BI: case CallingConv::PTX_Kernel: case CallingConv::PTX_Device: Assert1(!F.isVarArg(), @@ -1170,9 +1215,8 @@ void Verifier::VerifyCallSite(CallSite CS) { VerifyParameterAttrs(Attr, CS.getArgument(Idx-1)->getType(), false, I); - Attributes VArgI = Attr & Attribute::VarArgsIncompatible; - Assert1(!VArgI, "Attribute " + Attribute::getAsString(VArgI) + - " cannot be used for vararg call arguments!", I); + Assert1(!Attr.hasIncompatibleWithVarArgsAttrs(), + "Attribute 'sret' cannot be used for vararg call arguments!", I); } // Verify that there's no metadata unless it's a direct call to an intrinsic. @@ -1378,6 +1422,15 @@ void Verifier::visitLoadInst(LoadInst &LI) { "Load cannot have Release ordering", &LI); Assert1(LI.getAlignment() != 0, "Atomic load must specify explicit alignment", &LI); + if (!ElTy->isPointerTy()) { + Assert2(ElTy->isIntegerTy(), + "atomic store operand must have integer type!", + &LI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomic store operand must be power-of-two byte-sized integer", + &LI, ElTy); + } } else { Assert1(LI.getSynchScope() == CrossThread, "Non-atomic load cannot have SynchronizationScope specified", &LI); @@ -1444,6 +1497,15 @@ void Verifier::visitStoreInst(StoreInst &SI) { "Store cannot have Acquire ordering", &SI); Assert1(SI.getAlignment() != 0, "Atomic store must specify explicit alignment", &SI); + if (!ElTy->isPointerTy()) { + Assert2(ElTy->isIntegerTy(), + "atomic store operand must have integer type!", + &SI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomic store operand must be power-of-two byte-sized integer", + &SI, ElTy); + } } else { Assert1(SI.getSynchScope() == CrossThread, "Non-atomic store cannot have SynchronizationScope specified", &SI); @@ -1471,6 +1533,13 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType()); Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI); Type *ElTy = PTy->getElementType(); + Assert2(ElTy->isIntegerTy(), + "cmpxchg operand must have integer type!", + &CXI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "cmpxchg operand must be power-of-two byte-sized integer", + &CXI, ElTy); Assert2(ElTy == CXI.getOperand(1)->getType(), "Expected value type does not match pointer operand type!", &CXI, ElTy); @@ -1488,6 +1557,13 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI); Type *ElTy = PTy->getElementType(); + Assert2(ElTy->isIntegerTy(), + "atomicrmw operand must have integer type!", + &RMWI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomicrmw operand must be power-of-two byte-sized integer", + &RMWI, ElTy); Assert2(ElTy == RMWI.getOperand(1)->getType(), "Argument value type does not match pointer operand type!", &RMWI, ElTy); @@ -1575,6 +1651,13 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) { void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { Instruction *Op = cast<Instruction>(I.getOperand(i)); + // If the we have an invalid invoke, don't try to compute the dominance. + // We already reject it in the invoke specific checks and the dominance + // computation doesn't handle multiple edges. + if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) { + if (II->getNormalDest() == II->getUnwindDest()) + return; + } const Use &U = I.getOperandUse(i); Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, U), |