diff options
Diffstat (limited to 'include')
336 files changed, 80692 insertions, 0 deletions
diff --git a/include/llvm-c/Analysis.h b/include/llvm-c/Analysis.h new file mode 100644 index 0000000..68d8e65 --- /dev/null +++ b/include/llvm-c/Analysis.h @@ -0,0 +1,55 @@ +/*===-- llvm-c/Analysis.h - Analysis Library C Interface --------*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMAnalysis.a, which *| +|* implements various analyses of the LLVM IR. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_ANALYSIS_H +#define LLVM_C_ANALYSIS_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef enum { + LLVMAbortProcessAction, /* verifier will print to stderr and abort() */ + LLVMPrintMessageAction, /* verifier will print to stderr and return 1 */ + LLVMReturnStatusAction /* verifier will just return 1 */ +} LLVMVerifierFailureAction; + + +/* Verifies that a module is valid, taking the specified action if not. + Optionally returns a human-readable description of any invalid constructs. + OutMessage must be disposed with LLVMDisposeMessage. */ +int LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action, + char **OutMessage); + +/* Verifies that a single function is valid, taking the specified action. Useful + for debugging. */ +int LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action); + +/* Open up a ghostview window that displays the CFG of the current function. + Useful for debugging. */ +void LLVMViewFunctionCFG(LLVMValueRef Fn); +void LLVMViewFunctionCFGOnly(LLVMValueRef Fn); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/llvm-c/BitReader.h b/include/llvm-c/BitReader.h new file mode 100644 index 0000000..e30b431 --- /dev/null +++ b/include/llvm-c/BitReader.h @@ -0,0 +1,47 @@ +/*===-- llvm-c/BitReader.h - BitReader Library C Interface ------*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMBitReader.a, which *| +|* implements input of the LLVM bitcode format. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_BITCODEREADER_H +#define LLVM_C_BITCODEREADER_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Builds a module from the bitcode in the specified memory buffer, returning a + reference to the module via the OutModule parameter. Returns 0 on success. + Optionally returns a human-readable error message via OutMessage. */ +int LLVMParseBitcode(LLVMMemoryBufferRef MemBuf, + LLVMModuleRef *OutModule, char **OutMessage); + +/* Reads a module from the specified path, returning via the OutMP parameter + a module provider which performs lazy deserialization. Returns 0 on success. + Optionally returns a human-readable error message via OutMessage. */ +int LLVMGetBitcodeModuleProvider(LLVMMemoryBufferRef MemBuf, + LLVMModuleProviderRef *OutMP, + char **OutMessage); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/llvm-c/BitWriter.h b/include/llvm-c/BitWriter.h new file mode 100644 index 0000000..008ff9f --- /dev/null +++ b/include/llvm-c/BitWriter.h @@ -0,0 +1,43 @@ +/*===-- llvm-c/BitWriter.h - BitWriter Library C Interface ------*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMBitWriter.a, which *| +|* implements output of the LLVM bitcode format. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_BITCODEWRITER_H +#define LLVM_C_BITCODEWRITER_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/*===-- Operations on modules ---------------------------------------------===*/ + +/* Writes a module to an open file descriptor. Returns 0 on success. + Closes the Handle. Use dup first if this is not what you want. */ +int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int Handle); + +/* Writes a module to the specified path. Returns 0 on success. */ +int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h new file mode 100644 index 0000000..d2d8845 --- /dev/null +++ b/include/llvm-c/Core.h @@ -0,0 +1,856 @@ +/*===-- llvm-c/Core.h - Core Library C Interface ------------------*- C -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMCore.a, which implements *| +|* the LLVM intermediate representation. *| +|* *| +|* LLVM uses a polymorphic type hierarchy which C cannot represent, therefore *| +|* parameters must be passed as base types. Despite the declared types, most *| +|* of the functions provided operate only on branches of the type hierarchy. *| +|* The declared parameter names are descriptive and specify which type is *| +|* required. Additionally, each type hierarchy is documented along with the *| +|* functions that operate upon it. For more detail, refer to LLVM's C++ code. *| +|* If in doubt, refer to Core.cpp, which performs paramter downcasts in the *| +|* form unwrap<RequiredType>(Param). *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +|* When included into a C++ source file, also declares 'wrap' and 'unwrap' *| +|* helpers to perform opaque reference<-->pointer conversions. These helpers *| +|* are shorter and more tightly typed than writing the casts by hand when *| +|* authoring bindings. In assert builds, they will do runtime type checking. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_CORE_H +#define LLVM_C_CORE_H + +#ifdef __cplusplus + +/* Need these includes to support the LLVM 'cast' template for the C++ 'wrap' + and 'unwrap' conversion functions. */ +#include "llvm/Module.h" +#include "llvm/Support/IRBuilder.h" + +extern "C" { +#endif + + +/* Opaque types. */ + +/** + * The top-level container for all other LLVM Intermediate Representation (IR) + * objects. See the llvm::Module class. + */ +typedef struct LLVMOpaqueModule *LLVMModuleRef; + +/** + * Each value in the LLVM IR has a type, an LLVMTypeRef. See the llvm::Type + * class. + */ +typedef struct LLVMOpaqueType *LLVMTypeRef; + +/** + * When building recursive types using LLVMRefineType, LLVMTypeRef values may + * become invalid; use LLVMTypeHandleRef to resolve this problem. See the + * llvm::AbstractTypeHolder class. + */ +typedef struct LLVMOpaqueTypeHandle *LLVMTypeHandleRef; + +typedef struct LLVMOpaqueValue *LLVMValueRef; +typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef; +typedef struct LLVMOpaqueBuilder *LLVMBuilderRef; + +/* Used to provide a module to JIT or interpreter. + * See the llvm::ModuleProvider class. + */ +typedef struct LLVMOpaqueModuleProvider *LLVMModuleProviderRef; + +/* Used to provide a module to JIT or interpreter. + * See the llvm::MemoryBuffer class. + */ +typedef struct LLVMOpaqueMemoryBuffer *LLVMMemoryBufferRef; + +/** See the llvm::PassManagerBase class. */ +typedef struct LLVMOpaquePassManager *LLVMPassManagerRef; + +typedef enum { + LLVMZExtAttribute = 1<<0, + LLVMSExtAttribute = 1<<1, + LLVMNoReturnAttribute = 1<<2, + LLVMInRegAttribute = 1<<3, + LLVMStructRetAttribute = 1<<4, + LLVMNoUnwindAttribute = 1<<5, + LLVMNoAliasAttribute = 1<<6, + LLVMByValAttribute = 1<<7, + LLVMNestAttribute = 1<<8, + LLVMReadNoneAttribute = 1<<9, + LLVMReadOnlyAttribute = 1<<10 +} LLVMAttribute; + +typedef enum { + LLVMVoidTypeKind, /**< type with no size */ + LLVMFloatTypeKind, /**< 32 bit floating point type */ + LLVMDoubleTypeKind, /**< 64 bit floating point type */ + LLVMX86_FP80TypeKind, /**< 80 bit floating point type (X87) */ + LLVMFP128TypeKind, /**< 128 bit floating point type (112-bit mantissa)*/ + LLVMPPC_FP128TypeKind, /**< 128 bit floating point type (two 64-bits) */ + LLVMLabelTypeKind, /**< Labels */ + LLVMIntegerTypeKind, /**< Arbitrary bit width integers */ + LLVMFunctionTypeKind, /**< Functions */ + LLVMStructTypeKind, /**< Structures */ + LLVMArrayTypeKind, /**< Arrays */ + LLVMPointerTypeKind, /**< Pointers */ + LLVMOpaqueTypeKind, /**< Opaque: type with unknown structure */ + LLVMVectorTypeKind /**< SIMD 'packed' format, or other vector type */ +} LLVMTypeKind; + +typedef enum { + LLVMExternalLinkage, /**< Externally visible function */ + LLVMAvailableExternallyLinkage, + LLVMLinkOnceAnyLinkage, /**< Keep one copy of function when linking (inline)*/ + LLVMLinkOnceODRLinkage, /**< Same, but only replaced by something + equivalent. */ + LLVMWeakAnyLinkage, /**< Keep one copy of function when linking (weak) */ + LLVMWeakODRLinkage, /**< Same, but only replaced by something + equivalent. */ + LLVMAppendingLinkage, /**< Special purpose, only applies to global arrays */ + LLVMInternalLinkage, /**< Rename collisions when linking (static + functions) */ + LLVMPrivateLinkage, /**< Like Internal, but omit from symbol table */ + LLVMDLLImportLinkage, /**< Function to be imported from DLL */ + LLVMDLLExportLinkage, /**< Function to be accessible from DLL */ + LLVMExternalWeakLinkage,/**< ExternalWeak linkage description */ + LLVMGhostLinkage, /**< Stand-in functions for streaming fns from + bitcode */ + LLVMCommonLinkage /**< Tentative definitions */ +} LLVMLinkage; + +typedef enum { + LLVMDefaultVisibility, /**< The GV is visible */ + LLVMHiddenVisibility, /**< The GV is hidden */ + LLVMProtectedVisibility /**< The GV is protected */ +} LLVMVisibility; + +typedef enum { + LLVMCCallConv = 0, + LLVMFastCallConv = 8, + LLVMColdCallConv = 9, + LLVMX86StdcallCallConv = 64, + LLVMX86FastcallCallConv = 65 +} LLVMCallConv; + +typedef enum { + LLVMIntEQ = 32, /**< equal */ + LLVMIntNE, /**< not equal */ + LLVMIntUGT, /**< unsigned greater than */ + LLVMIntUGE, /**< unsigned greater or equal */ + LLVMIntULT, /**< unsigned less than */ + LLVMIntULE, /**< unsigned less or equal */ + LLVMIntSGT, /**< signed greater than */ + LLVMIntSGE, /**< signed greater or equal */ + LLVMIntSLT, /**< signed less than */ + LLVMIntSLE /**< signed less or equal */ +} LLVMIntPredicate; + +typedef enum { + LLVMRealPredicateFalse, /**< Always false (always folded) */ + LLVMRealOEQ, /**< True if ordered and equal */ + LLVMRealOGT, /**< True if ordered and greater than */ + LLVMRealOGE, /**< True if ordered and greater than or equal */ + LLVMRealOLT, /**< True if ordered and less than */ + LLVMRealOLE, /**< True if ordered and less than or equal */ + LLVMRealONE, /**< True if ordered and operands are unequal */ + LLVMRealORD, /**< True if ordered (no nans) */ + LLVMRealUNO, /**< True if unordered: isnan(X) | isnan(Y) */ + LLVMRealUEQ, /**< True if unordered or equal */ + LLVMRealUGT, /**< True if unordered or greater than */ + LLVMRealUGE, /**< True if unordered, greater than, or equal */ + LLVMRealULT, /**< True if unordered or less than */ + LLVMRealULE, /**< True if unordered, less than, or equal */ + LLVMRealUNE, /**< True if unordered or not equal */ + LLVMRealPredicateTrue /**< Always true (always folded) */ +} LLVMRealPredicate; + + +/*===-- Error handling ----------------------------------------------------===*/ + +void LLVMDisposeMessage(char *Message); + + +/*===-- Modules -----------------------------------------------------------===*/ + +/* Create and destroy modules. */ +/** See llvm::Module::Module. */ +LLVMModuleRef LLVMModuleCreateWithName(const char *ModuleID); + +/** See llvm::Module::~Module. */ +void LLVMDisposeModule(LLVMModuleRef M); + +/** Data layout. See Module::getDataLayout. */ +const char *LLVMGetDataLayout(LLVMModuleRef M); +void LLVMSetDataLayout(LLVMModuleRef M, const char *Triple); + +/** Target triple. See Module::getTargetTriple. */ +const char *LLVMGetTarget(LLVMModuleRef M); +void LLVMSetTarget(LLVMModuleRef M, const char *Triple); + +/** See Module::addTypeName. */ +int LLVMAddTypeName(LLVMModuleRef M, const char *Name, LLVMTypeRef Ty); +void LLVMDeleteTypeName(LLVMModuleRef M, const char *Name); + +/** See Module::dump. */ +void LLVMDumpModule(LLVMModuleRef M); + + +/*===-- Types -------------------------------------------------------------===*/ + +/* LLVM types conform to the following hierarchy: + * + * types: + * integer type + * real type + * function type + * sequence types: + * array type + * pointer type + * vector type + * void type + * label type + * opaque type + */ + +/** See llvm::LLVMTypeKind::getTypeID. */ +LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty); + +/* Operations on integer types */ +LLVMTypeRef LLVMInt1Type(void); +LLVMTypeRef LLVMInt8Type(void); +LLVMTypeRef LLVMInt16Type(void); +LLVMTypeRef LLVMInt32Type(void); +LLVMTypeRef LLVMInt64Type(void); +LLVMTypeRef LLVMIntType(unsigned NumBits); +unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy); + +/* Operations on real types */ +LLVMTypeRef LLVMFloatType(void); +LLVMTypeRef LLVMDoubleType(void); +LLVMTypeRef LLVMX86FP80Type(void); +LLVMTypeRef LLVMFP128Type(void); +LLVMTypeRef LLVMPPCFP128Type(void); + +/* Operations on function types */ +LLVMTypeRef LLVMFunctionType(LLVMTypeRef ReturnType, + LLVMTypeRef *ParamTypes, unsigned ParamCount, + int IsVarArg); +int LLVMIsFunctionVarArg(LLVMTypeRef FunctionTy); +LLVMTypeRef LLVMGetReturnType(LLVMTypeRef FunctionTy); +unsigned LLVMCountParamTypes(LLVMTypeRef FunctionTy); +void LLVMGetParamTypes(LLVMTypeRef FunctionTy, LLVMTypeRef *Dest); + +/* Operations on struct types */ +LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes, unsigned ElementCount, + int Packed); +unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy); +void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest); +int LLVMIsPackedStruct(LLVMTypeRef StructTy); + +/* Operations on array, pointer, and vector types (sequence types) */ +LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount); +LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace); +LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount); + +LLVMTypeRef LLVMGetElementType(LLVMTypeRef Ty); +unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy); +unsigned LLVMGetPointerAddressSpace(LLVMTypeRef PointerTy); +unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy); + +/* Operations on other types */ +LLVMTypeRef LLVMVoidType(void); +LLVMTypeRef LLVMLabelType(void); +LLVMTypeRef LLVMOpaqueType(void); + +/* Operations on type handles */ +LLVMTypeHandleRef LLVMCreateTypeHandle(LLVMTypeRef PotentiallyAbstractTy); +void LLVMRefineType(LLVMTypeRef AbstractTy, LLVMTypeRef ConcreteTy); +LLVMTypeRef LLVMResolveTypeHandle(LLVMTypeHandleRef TypeHandle); +void LLVMDisposeTypeHandle(LLVMTypeHandleRef TypeHandle); + + +/*===-- Values ------------------------------------------------------------===*/ + +/* The bulk of LLVM's object model consists of values, which comprise a very + * rich type hierarchy. + */ + +#define LLVM_FOR_EACH_VALUE_SUBCLASS(macro) \ + macro(Argument) \ + macro(BasicBlock) \ + macro(InlineAsm) \ + macro(User) \ + macro(Constant) \ + macro(ConstantAggregateZero) \ + macro(ConstantArray) \ + macro(ConstantExpr) \ + macro(ConstantFP) \ + macro(ConstantInt) \ + macro(ConstantPointerNull) \ + macro(ConstantStruct) \ + macro(ConstantVector) \ + macro(GlobalValue) \ + macro(Function) \ + macro(GlobalAlias) \ + macro(GlobalVariable) \ + macro(UndefValue) \ + macro(Instruction) \ + macro(BinaryOperator) \ + macro(CallInst) \ + macro(IntrinsicInst) \ + macro(DbgInfoIntrinsic) \ + macro(DbgDeclareInst) \ + macro(DbgFuncStartInst) \ + macro(DbgRegionEndInst) \ + macro(DbgRegionStartInst) \ + macro(DbgStopPointInst) \ + macro(EHSelectorInst) \ + macro(MemIntrinsic) \ + macro(MemCpyInst) \ + macro(MemMoveInst) \ + macro(MemSetInst) \ + macro(CmpInst) \ + macro(FCmpInst) \ + macro(ICmpInst) \ + macro(VFCmpInst) \ + macro(VICmpInst) \ + macro(ExtractElementInst) \ + macro(GetElementPtrInst) \ + macro(InsertElementInst) \ + macro(InsertValueInst) \ + macro(PHINode) \ + macro(SelectInst) \ + macro(ShuffleVectorInst) \ + macro(StoreInst) \ + macro(TerminatorInst) \ + macro(BranchInst) \ + macro(InvokeInst) \ + macro(ReturnInst) \ + macro(SwitchInst) \ + macro(UnreachableInst) \ + macro(UnwindInst) \ + macro(UnaryInstruction) \ + macro(AllocationInst) \ + macro(AllocaInst) \ + macro(MallocInst) \ + macro(CastInst) \ + macro(BitCastInst) \ + macro(FPExtInst) \ + macro(FPToSIInst) \ + macro(FPToUIInst) \ + macro(FPTruncInst) \ + macro(IntToPtrInst) \ + macro(PtrToIntInst) \ + macro(SExtInst) \ + macro(SIToFPInst) \ + macro(TruncInst) \ + macro(UIToFPInst) \ + macro(ZExtInst) \ + macro(ExtractValueInst) \ + macro(FreeInst) \ + macro(LoadInst) \ + macro(VAArgInst) + +/* Operations on all values */ +LLVMTypeRef LLVMTypeOf(LLVMValueRef Val); +const char *LLVMGetValueName(LLVMValueRef Val); +void LLVMSetValueName(LLVMValueRef Val, const char *Name); +void LLVMDumpValue(LLVMValueRef Val); + +/* Conversion functions. Return the input value if it is an instance of the + specified class, otherwise NULL. See llvm::dyn_cast_or_null<>. */ +#define LLVM_DECLARE_VALUE_CAST(name) \ + LLVMValueRef LLVMIsA##name(LLVMValueRef Val); +LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST) + +/* Operations on constants of any type */ +LLVMValueRef LLVMConstNull(LLVMTypeRef Ty); /* all zeroes */ +LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty); /* only for int/vector */ +LLVMValueRef LLVMGetUndef(LLVMTypeRef Ty); +int LLVMIsConstant(LLVMValueRef Val); +int LLVMIsNull(LLVMValueRef Val); +int LLVMIsUndef(LLVMValueRef Val); + +/* Operations on scalar constants */ +LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N, + int SignExtend); +LLVMValueRef LLVMConstReal(LLVMTypeRef RealTy, double N); +LLVMValueRef LLVMConstRealOfString(LLVMTypeRef RealTy, const char *Text); + +/* Operations on composite constants */ +LLVMValueRef LLVMConstString(const char *Str, unsigned Length, + int DontNullTerminate); +LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy, + LLVMValueRef *ConstantVals, unsigned Length); +LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count, + int packed); +LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size); + +/* Constant expressions */ +LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty); +LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal); +LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal); +LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstFDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstURem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstSRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstFRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstAnd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstOr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstXor(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstICmp(LLVMIntPredicate Predicate, + LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate, + LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant); +LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal, + LLVMValueRef *ConstantIndices, unsigned NumIndices); +LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstFPTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstFPExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstUIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstSIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstFPToUI(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstFPToSI(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstPtrToInt(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstIntToPtr(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstBitCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType); +LLVMValueRef LLVMConstSelect(LLVMValueRef ConstantCondition, + LLVMValueRef ConstantIfTrue, + LLVMValueRef ConstantIfFalse); +LLVMValueRef LLVMConstExtractElement(LLVMValueRef VectorConstant, + LLVMValueRef IndexConstant); +LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant, + LLVMValueRef ElementValueConstant, + LLVMValueRef IndexConstant); +LLVMValueRef LLVMConstShuffleVector(LLVMValueRef VectorAConstant, + LLVMValueRef VectorBConstant, + LLVMValueRef MaskConstant); +LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList, + unsigned NumIdx); +LLVMValueRef LLVMConstInsertValue(LLVMValueRef AggConstant, + LLVMValueRef ElementValueConstant, + unsigned *IdxList, unsigned NumIdx); +LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty, + const char *AsmString, const char *Constraints, + int HasSideEffects); + +/* Operations on global variables, functions, and aliases (globals) */ +LLVMModuleRef LLVMGetGlobalParent(LLVMValueRef Global); +int LLVMIsDeclaration(LLVMValueRef Global); +LLVMLinkage LLVMGetLinkage(LLVMValueRef Global); +void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage); +const char *LLVMGetSection(LLVMValueRef Global); +void LLVMSetSection(LLVMValueRef Global, const char *Section); +LLVMVisibility LLVMGetVisibility(LLVMValueRef Global); +void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz); +unsigned LLVMGetAlignment(LLVMValueRef Global); +void LLVMSetAlignment(LLVMValueRef Global, unsigned Bytes); + +/* Operations on global variables */ +LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name); +LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name); +LLVMValueRef LLVMGetFirstGlobal(LLVMModuleRef M); +LLVMValueRef LLVMGetLastGlobal(LLVMModuleRef M); +LLVMValueRef LLVMGetNextGlobal(LLVMValueRef GlobalVar); +LLVMValueRef LLVMGetPreviousGlobal(LLVMValueRef GlobalVar); +void LLVMDeleteGlobal(LLVMValueRef GlobalVar); +LLVMValueRef LLVMGetInitializer(LLVMValueRef GlobalVar); +void LLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal); +int LLVMIsThreadLocal(LLVMValueRef GlobalVar); +void LLVMSetThreadLocal(LLVMValueRef GlobalVar, int IsThreadLocal); +int LLVMIsGlobalConstant(LLVMValueRef GlobalVar); +void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, int IsConstant); + +/* Operations on aliases */ +LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee, + const char *Name); + +/* Operations on functions */ +LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name, + LLVMTypeRef FunctionTy); +LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name); +LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M); +LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M); +LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn); +LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn); +void LLVMDeleteFunction(LLVMValueRef Fn); +unsigned LLVMGetIntrinsicID(LLVMValueRef Fn); +unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn); +void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC); +const char *LLVMGetGC(LLVMValueRef Fn); +void LLVMSetGC(LLVMValueRef Fn, const char *Name); +void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA); +void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA); + +/* Operations on parameters */ +unsigned LLVMCountParams(LLVMValueRef Fn); +void LLVMGetParams(LLVMValueRef Fn, LLVMValueRef *Params); +LLVMValueRef LLVMGetParam(LLVMValueRef Fn, unsigned Index); +LLVMValueRef LLVMGetParamParent(LLVMValueRef Inst); +LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn); +LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn); +LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg); +LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg); +void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA); +void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA); +void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align); + +/* Operations on basic blocks */ +LLVMValueRef LLVMBasicBlockAsValue(LLVMBasicBlockRef BB); +int LLVMValueIsBasicBlock(LLVMValueRef Val); +LLVMBasicBlockRef LLVMValueAsBasicBlock(LLVMValueRef Val); +LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB); +unsigned LLVMCountBasicBlocks(LLVMValueRef Fn); +void LLVMGetBasicBlocks(LLVMValueRef Fn, LLVMBasicBlockRef *BasicBlocks); +LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn); +LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn); +LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB); +LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB); +LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn); +LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef Fn, const char *Name); +LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef InsertBeforeBB, + const char *Name); +void LLVMDeleteBasicBlock(LLVMBasicBlockRef BB); + +/* Operations on instructions */ +LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst); +LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB); +LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB); +LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst); +LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst); + +/* Operations on call sites */ +void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC); +unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr); +void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute); +void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index, + LLVMAttribute); +void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index, + unsigned align); + +/* Operations on call instructions (only) */ +int LLVMIsTailCall(LLVMValueRef CallInst); +void LLVMSetTailCall(LLVMValueRef CallInst, int IsTailCall); + +/* Operations on phi nodes */ +void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues, + LLVMBasicBlockRef *IncomingBlocks, unsigned Count); +unsigned LLVMCountIncoming(LLVMValueRef PhiNode); +LLVMValueRef LLVMGetIncomingValue(LLVMValueRef PhiNode, unsigned Index); +LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index); + +/*===-- Instruction builders ----------------------------------------------===*/ + +/* An instruction builder represents a point within a basic block, and is the + * exclusive means of building instructions using the C interface. + */ + +LLVMBuilderRef LLVMCreateBuilder(void); +void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block, + LLVMValueRef Instr); +void LLVMPositionBuilderBefore(LLVMBuilderRef Builder, LLVMValueRef Instr); +void LLVMPositionBuilderAtEnd(LLVMBuilderRef Builder, LLVMBasicBlockRef Block); +LLVMBasicBlockRef LLVMGetInsertBlock(LLVMBuilderRef Builder); +void LLVMClearInsertionPosition(LLVMBuilderRef Builder); +void LLVMInsertIntoBuilder(LLVMBuilderRef Builder, LLVMValueRef Instr); +void LLVMDisposeBuilder(LLVMBuilderRef Builder); + +/* Terminators */ +LLVMValueRef LLVMBuildRetVoid(LLVMBuilderRef); +LLVMValueRef LLVMBuildRet(LLVMBuilderRef, LLVMValueRef V); +LLVMValueRef LLVMBuildBr(LLVMBuilderRef, LLVMBasicBlockRef Dest); +LLVMValueRef LLVMBuildCondBr(LLVMBuilderRef, LLVMValueRef If, + LLVMBasicBlockRef Then, LLVMBasicBlockRef Else); +LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef, LLVMValueRef V, + LLVMBasicBlockRef Else, unsigned NumCases); +LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn, + LLVMValueRef *Args, unsigned NumArgs, + LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch, + const char *Name); +LLVMValueRef LLVMBuildUnwind(LLVMBuilderRef); +LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef); + +/* Add a case to the switch instruction */ +void LLVMAddCase(LLVMValueRef Switch, LLVMValueRef OnVal, + LLVMBasicBlockRef Dest); + +/* Arithmetic */ +LLVMValueRef LLVMBuildAdd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildSub(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildMul(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildUDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildSDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildFDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildURem(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildSRem(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildFRem(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildShl(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildLShr(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildAShr(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildAnd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildOr(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildXor(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildNeg(LLVMBuilderRef, LLVMValueRef V, const char *Name); +LLVMValueRef LLVMBuildNot(LLVMBuilderRef, LLVMValueRef V, const char *Name); + +/* Memory */ +LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name); +LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef, LLVMTypeRef Ty, + LLVMValueRef Val, const char *Name); +LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name); +LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef, LLVMTypeRef Ty, + LLVMValueRef Val, const char *Name); +LLVMValueRef LLVMBuildFree(LLVMBuilderRef, LLVMValueRef PointerVal); +LLVMValueRef LLVMBuildLoad(LLVMBuilderRef, LLVMValueRef PointerVal, + const char *Name); +LLVMValueRef LLVMBuildStore(LLVMBuilderRef, LLVMValueRef Val, LLVMValueRef Ptr); +LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer, + LLVMValueRef *Indices, unsigned NumIndices, + const char *Name); + +/* Casts */ +LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildZExt(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildSExt(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildFPToUI(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildFPToSI(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildUIToFP(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildSIToFP(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildFPTrunc(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildFPExt(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildPtrToInt(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildIntToPtr(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); +LLVMValueRef LLVMBuildBitCast(LLVMBuilderRef, LLVMValueRef Val, + LLVMTypeRef DestTy, const char *Name); + +/* Comparisons */ +LLVMValueRef LLVMBuildICmp(LLVMBuilderRef, LLVMIntPredicate Op, + LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); +LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef, LLVMRealPredicate Op, + LLVMValueRef LHS, LLVMValueRef RHS, + const char *Name); + +/* Miscellaneous instructions */ +LLVMValueRef LLVMBuildPhi(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name); +LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn, + LLVMValueRef *Args, unsigned NumArgs, + const char *Name); +LLVMValueRef LLVMBuildSelect(LLVMBuilderRef, LLVMValueRef If, + LLVMValueRef Then, LLVMValueRef Else, + const char *Name); +LLVMValueRef LLVMBuildVAArg(LLVMBuilderRef, LLVMValueRef List, LLVMTypeRef Ty, + const char *Name); +LLVMValueRef LLVMBuildExtractElement(LLVMBuilderRef, LLVMValueRef VecVal, + LLVMValueRef Index, const char *Name); +LLVMValueRef LLVMBuildInsertElement(LLVMBuilderRef, LLVMValueRef VecVal, + LLVMValueRef EltVal, LLVMValueRef Index, + const char *Name); +LLVMValueRef LLVMBuildShuffleVector(LLVMBuilderRef, LLVMValueRef V1, + LLVMValueRef V2, LLVMValueRef Mask, + const char *Name); +LLVMValueRef LLVMBuildExtractValue(LLVMBuilderRef, LLVMValueRef AggVal, + unsigned Index, const char *Name); +LLVMValueRef LLVMBuildInsertValue(LLVMBuilderRef, LLVMValueRef AggVal, + LLVMValueRef EltVal, unsigned Index, + const char *Name); + + +/*===-- Module providers --------------------------------------------------===*/ + +/* Encapsulates the module M in a module provider, taking ownership of the + * module. + * See the constructor llvm::ExistingModuleProvider::ExistingModuleProvider. + */ +LLVMModuleProviderRef +LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M); + +/* Destroys the module provider MP as well as the contained module. + * See the destructor llvm::ModuleProvider::~ModuleProvider. + */ +void LLVMDisposeModuleProvider(LLVMModuleProviderRef MP); + + +/*===-- Memory buffers ----------------------------------------------------===*/ + +int LLVMCreateMemoryBufferWithContentsOfFile(const char *Path, + LLVMMemoryBufferRef *OutMemBuf, + char **OutMessage); +int LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf, + char **OutMessage); +void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf); + + +/*===-- Pass Managers -----------------------------------------------------===*/ + +/** Constructs a new whole-module pass pipeline. This type of pipeline is + suitable for link-time optimization and whole-module transformations. + See llvm::PassManager::PassManager. */ +LLVMPassManagerRef LLVMCreatePassManager(void); + +/** Constructs a new function-by-function pass pipeline over the module + provider. It does not take ownership of the module provider. This type of + pipeline is suitable for code generation and JIT compilation tasks. + See llvm::FunctionPassManager::FunctionPassManager. */ +LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef MP); + +/** Initializes, executes on the provided module, and finalizes all of the + passes scheduled in the pass manager. Returns 1 if any of the passes + modified the module, 0 otherwise. See llvm::PassManager::run(Module&). */ +int LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M); + +/** Initializes all of the function passes scheduled in the function pass + manager. Returns 1 if any of the passes modified the module, 0 otherwise. + See llvm::FunctionPassManager::doInitialization. */ +int LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM); + +/** Executes all of the function passes scheduled in the function pass manager + on the provided function. Returns 1 if any of the passes modified the + function, false otherwise. + See llvm::FunctionPassManager::run(Function&). */ +int LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F); + +/** Finalizes all of the function passes scheduled in in the function pass + manager. Returns 1 if any of the passes modified the module, 0 otherwise. + See llvm::FunctionPassManager::doFinalization. */ +int LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM); + +/** Frees the memory of a pass pipeline. For function pipelines, does not free + the module provider. + See llvm::PassManagerBase::~PassManagerBase. */ +void LLVMDisposePassManager(LLVMPassManagerRef PM); + + +#ifdef __cplusplus +} + +namespace llvm { + class ModuleProvider; + class MemoryBuffer; + class PassManagerBase; + + #define DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \ + inline ty *unwrap(ref P) { \ + return reinterpret_cast<ty*>(P); \ + } \ + \ + inline ref wrap(const ty *P) { \ + return reinterpret_cast<ref>(const_cast<ty*>(P)); \ + } + + #define DEFINE_ISA_CONVERSION_FUNCTIONS(ty, ref) \ + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \ + \ + template<typename T> \ + inline T *unwrap(ref P) { \ + return cast<T>(unwrap(P)); \ + } + + #define DEFINE_STDCXX_CONVERSION_FUNCTIONS(ty, ref) \ + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \ + \ + template<typename T> \ + inline T *unwrap(ref P) { \ + T *Q = dynamic_cast<T*>(unwrap(P)); \ + assert(Q && "Invalid cast!"); \ + return Q; \ + } + + DEFINE_ISA_CONVERSION_FUNCTIONS (Type, LLVMTypeRef ) + DEFINE_ISA_CONVERSION_FUNCTIONS (Value, LLVMValueRef ) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Module, LLVMModuleRef ) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef ) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef ) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(PATypeHolder, LLVMTypeHandleRef ) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ModuleProvider, LLVMModuleProviderRef) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MemoryBuffer, LLVMMemoryBufferRef ) + DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBase, LLVMPassManagerRef ) + + #undef DEFINE_STDCXX_CONVERSION_FUNCTIONS + #undef DEFINE_ISA_CONVERSION_FUNCTIONS + #undef DEFINE_SIMPLE_CONVERSION_FUNCTIONS + + /* Specialized opaque type conversions. + */ + inline Type **unwrap(LLVMTypeRef* Tys) { + return reinterpret_cast<Type**>(Tys); + } + + inline LLVMTypeRef *wrap(const Type **Tys) { + return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys)); + } + + /* Specialized opaque value conversions. + */ + inline Value **unwrap(LLVMValueRef *Vals) { + return reinterpret_cast<Value**>(Vals); + } + + template<typename T> + inline T **unwrap(LLVMValueRef *Vals, unsigned Length) { + #if DEBUG + for (LLVMValueRef *I = Vals, E = Vals + Length; I != E; ++I) + cast<T>(*I); + #endif + return reinterpret_cast<T**>(Vals); + } + + inline LLVMValueRef *wrap(const Value **Vals) { + return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals)); + } +} + +#endif /* !defined(__cplusplus) */ + +#endif /* !defined(LLVM_C_CORE_H) */ diff --git a/include/llvm-c/ExecutionEngine.h b/include/llvm-c/ExecutionEngine.h new file mode 100644 index 0000000..a31dc82 --- /dev/null +++ b/include/llvm-c/ExecutionEngine.h @@ -0,0 +1,124 @@ +/*===-- llvm-c/ExecutionEngine.h - ExecutionEngine Lib C Iface --*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMExecutionEngine.o, which *| +|* implements various analyses of the LLVM IR. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_EXECUTIONENGINE_H +#define LLVM_C_EXECUTIONENGINE_H + +#include "llvm-c/Core.h" +#include "llvm-c/Target.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct LLVMOpaqueGenericValue *LLVMGenericValueRef; +typedef struct LLVMOpaqueExecutionEngine *LLVMExecutionEngineRef; + +/*===-- Operations on generic values --------------------------------------===*/ + +LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty, + unsigned long long N, + int IsSigned); + +LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P); + +LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef Ty, double N); + +unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef); + +unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenVal, + int IsSigned); + +void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal); + +double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal); + +void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal); + +/*===-- Operations on execution engines -----------------------------------===*/ + +int LLVMCreateExecutionEngine(LLVMExecutionEngineRef *OutEE, + LLVMModuleProviderRef MP, + char **OutError); + +int LLVMCreateInterpreter(LLVMExecutionEngineRef *OutInterp, + LLVMModuleProviderRef MP, + char **OutError); + +int LLVMCreateJITCompiler(LLVMExecutionEngineRef *OutJIT, + LLVMModuleProviderRef MP, + unsigned OptLevel, + char **OutError); + +void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE); + +void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE); + +void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE); + +int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F, + unsigned ArgC, const char * const *ArgV, + const char * const *EnvP); + +LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F, + unsigned NumArgs, + LLVMGenericValueRef *Args); + +void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F); + +void LLVMAddModuleProvider(LLVMExecutionEngineRef EE, LLVMModuleProviderRef MP); + +int LLVMRemoveModuleProvider(LLVMExecutionEngineRef EE, + LLVMModuleProviderRef MP, + LLVMModuleRef *OutMod, char **OutError); + +int LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name, + LLVMValueRef *OutFn); + +LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE); + +void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global, + void* Addr); + +void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global); + +#ifdef __cplusplus +} + +namespace llvm { + class GenericValue; + class ExecutionEngine; + + #define DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \ + inline ty *unwrap(ref P) { \ + return reinterpret_cast<ty*>(P); \ + } \ + \ + inline ref wrap(const ty *P) { \ + return reinterpret_cast<ref>(const_cast<ty*>(P)); \ + } + + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef ) + DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionEngine, LLVMExecutionEngineRef) + + #undef DEFINE_SIMPLE_CONVERSION_FUNCTIONS +} + +#endif /* defined(__cplusplus) */ + +#endif diff --git a/include/llvm-c/LinkTimeOptimizer.h b/include/llvm-c/LinkTimeOptimizer.h new file mode 100644 index 0000000..ccfdcee --- /dev/null +++ b/include/llvm-c/LinkTimeOptimizer.h @@ -0,0 +1,58 @@ +//===-- llvm/LinkTimeOptimizer.h - LTO Public C Interface -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header provides a C API to use the LLVM link time optimization +// library. This is inteded to be used by linkers which are C-only in +// their implementation for performing LTO. +// +//===----------------------------------------------------------------------===// + +#ifndef __LTO_CAPI_H__ +#define __LTO_CAPI_H__ + +#ifdef __cplusplus +extern "C" { +#endif + + /// This provides a dummy type for pointers to the LTO object. + typedef void* llvm_lto_t; + + /// This provides a C-visible enumerator to manage status codes. + /// This should map exactly onto the C++ enumerator LTOStatus. + typedef enum llvm_lto_status { + LLVM_LTO_UNKNOWN, + LLVM_LTO_OPT_SUCCESS, + LLVM_LTO_READ_SUCCESS, + LLVM_LTO_READ_FAILURE, + LLVM_LTO_WRITE_FAILURE, + LLVM_LTO_NO_TARGET, + LLVM_LTO_NO_WORK, + LLVM_LTO_MODULE_MERGE_FAILURE, + LLVM_LTO_ASM_FAILURE, + + // Added C-specific error codes + LLVM_LTO_NULL_OBJECT + } llvm_lto_status_t; + + /// This provides C interface to initialize link time optimizer. This allows + /// linker to use dlopen() interface to dynamically load LinkTimeOptimizer. + /// extern "C" helps, because dlopen() interface uses name to find the symbol. + extern llvm_lto_t llvm_create_optimizer(void); + extern void llvm_destroy_optimizer(llvm_lto_t lto); + + extern llvm_lto_status_t llvm_read_object_file + (llvm_lto_t lto, const char* input_filename); + extern llvm_lto_status_t llvm_optimize_modules + (llvm_lto_t lto, const char* output_filename); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/llvm-c/Target.h b/include/llvm-c/Target.h new file mode 100644 index 0000000..5de5bc7 --- /dev/null +++ b/include/llvm-c/Target.h @@ -0,0 +1,131 @@ +/*===-- llvm-c/Target.h - Target Lib C Iface --------------------*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMTarget.a, which *| +|* implements target information. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_TARGET_H +#define LLVM_C_TARGET_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum { LLVMBigEndian, LLVMLittleEndian }; +typedef int LLVMByteOrdering; + +typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef; +typedef struct LLVMStructLayout *LLVMStructLayoutRef; + + +/*===-- Target Data -------------------------------------------------------===*/ + +/** Creates target data from a target layout string. + See the constructor llvm::TargetData::TargetData. */ +LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep); + +/** Adds target data information to a pass manager. This does not take ownership + of the target data. + See the method llvm::PassManagerBase::add. */ +void LLVMAddTargetData(LLVMTargetDataRef, LLVMPassManagerRef); + +/** Converts target data to a target layout string. The string must be disposed + with LLVMDisposeMessage. + See the constructor llvm::TargetData::TargetData. */ +char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef); + +/** Returns the byte order of a target, either LLVMBigEndian or + LLVMLittleEndian. + See the method llvm::TargetData::isLittleEndian. */ +LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef); + +/** Returns the pointer size in bytes for a target. + See the method llvm::TargetData::getPointerSize. */ +unsigned LLVMPointerSize(LLVMTargetDataRef); + +/** Returns the integer type that is the same size as a pointer on a target. + See the method llvm::TargetData::getIntPtrType. */ +LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef); + +/** Computes the size of a type in bytes for a target. + See the method llvm::TargetData::getTypeSizeInBits. */ +unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef); + +/** Computes the storage size of a type in bytes for a target. + See the method llvm::TargetData::getTypeStoreSize. */ +unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef); + +/** Computes the ABI size of a type in bytes for a target. + See the method llvm::TargetData::getTypeAllocSize. */ +unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef); + +/** Computes the ABI alignment of a type in bytes for a target. + See the method llvm::TargetData::getTypeABISize. */ +unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); + +/** Computes the call frame alignment of a type in bytes for a target. + See the method llvm::TargetData::getTypeABISize. */ +unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); + +/** Computes the preferred alignment of a type in bytes for a target. + See the method llvm::TargetData::getTypeABISize. */ +unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); + +/** Computes the preferred alignment of a global variable in bytes for a target. + See the method llvm::TargetData::getPreferredAlignment. */ +unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef, + LLVMValueRef GlobalVar); + +/** Computes the structure element that contains the byte offset for a target. + See the method llvm::StructLayout::getElementContainingOffset. */ +unsigned LLVMElementAtOffset(LLVMTargetDataRef, LLVMTypeRef StructTy, + unsigned long long Offset); + +/** Computes the byte offset of the indexed struct element for a target. + See the method llvm::StructLayout::getElementContainingOffset. */ +unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef, LLVMTypeRef StructTy, + unsigned Element); + +/** Struct layouts are speculatively cached. If a TargetDataRef is alive when + types are being refined and removed, this method must be called whenever a + struct type is removed to avoid a dangling pointer in this cache. + See the method llvm::TargetData::InvalidateStructLayoutInfo. */ +void LLVMInvalidateStructLayout(LLVMTargetDataRef, LLVMTypeRef StructTy); + +/** Deallocates a TargetData. + See the destructor llvm::TargetData::~TargetData. */ +void LLVMDisposeTargetData(LLVMTargetDataRef); + + +#ifdef __cplusplus +} + +namespace llvm { + class TargetData; + + inline TargetData *unwrap(LLVMTargetDataRef P) { + return reinterpret_cast<TargetData*>(P); + } + + inline LLVMTargetDataRef wrap(const TargetData *P) { + return reinterpret_cast<LLVMTargetDataRef>(const_cast<TargetData*>(P)); + } +} + +#endif /* defined(__cplusplus) */ + +#endif diff --git a/include/llvm-c/Transforms/IPO.h b/include/llvm-c/Transforms/IPO.h new file mode 100644 index 0000000..9bc947f --- /dev/null +++ b/include/llvm-c/Transforms/IPO.h @@ -0,0 +1,70 @@ +/*===-- IPO.h - Interprocedural Transformations C Interface -----*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMIPO.a, which implements *| +|* various interprocedural transformations of the LLVM IR. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_TRANSFORMS_IPO_H +#define LLVM_C_TRANSFORMS_IPO_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** See llvm::createArgumentPromotionPass function. */ +void LLVMAddArgumentPromotionPass(LLVMPassManagerRef PM); + +/** See llvm::createConstantMergePass function. */ +void LLVMAddConstantMergePass(LLVMPassManagerRef PM); + +/** See llvm::createDeadArgEliminationPass function. */ +void LLVMAddDeadArgEliminationPass(LLVMPassManagerRef PM); + +/** See llvm::createDeadTypeEliminationPass function. */ +void LLVMAddDeadTypeEliminationPass(LLVMPassManagerRef PM); + +/** See llvm::createFunctionAttrsPass function. */ +void LLVMAddFunctionAttrsPass(LLVMPassManagerRef PM); + +/** See llvm::createFunctionInliningPass function. */ +void LLVMAddFunctionInliningPass(LLVMPassManagerRef PM); + +/** See llvm::createGlobalDCEPass function. */ +void LLVMAddGlobalDCEPass(LLVMPassManagerRef PM); + +/** See llvm::createGlobalOptimizerPass function. */ +void LLVMAddGlobalOptimizerPass(LLVMPassManagerRef PM); + +/** See llvm::createIPConstantPropagationPass function. */ +void LLVMAddIPConstantPropagationPass(LLVMPassManagerRef PM); + +/** See llvm::createLowerSetJmpPass function. */ +void LLVMAddLowerSetJmpPass(LLVMPassManagerRef PM); + +/** See llvm::createPruneEHPass function. */ +void LLVMAddPruneEHPass(LLVMPassManagerRef PM); + +/** See llvm::createRaiseAllocationsPass function. */ +void LLVMAddRaiseAllocationsPass(LLVMPassManagerRef PM); + +/** See llvm::createStripDeadPrototypesPass function. */ +void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM); + +/** See llvm::createStripSymbolsPass function. */ +void LLVMAddStripSymbolsPass(LLVMPassManagerRef PM); + +#ifdef __cplusplus +} +#endif /* defined(__cplusplus) */ + +#endif diff --git a/include/llvm-c/Transforms/Scalar.h b/include/llvm-c/Transforms/Scalar.h new file mode 100644 index 0000000..e52a1d1 --- /dev/null +++ b/include/llvm-c/Transforms/Scalar.h @@ -0,0 +1,101 @@ +/*===-- Scalar.h - Scalar Transformation Library C Interface ----*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMScalarOpts.a, which *| +|* implements various scalar transformations of the LLVM IR. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_TRANSFORMS_SCALAR_H +#define LLVM_C_TRANSFORMS_SCALAR_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** See llvm::createAggressiveDCEPass function. */ +void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM); + +/** See llvm::createCFGSimplificationPass function. */ +void LLVMAddCFGSimplificationPass(LLVMPassManagerRef PM); + +/** See llvm::createCondPropagationPass function. */ +void LLVMAddCondPropagationPass(LLVMPassManagerRef PM); + +/** See llvm::createDeadStoreEliminationPass function. */ +void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM); + +/** See llvm::createGVNPass function. */ +void LLVMAddGVNPass(LLVMPassManagerRef PM); + +/** See llvm::createIndVarSimplifyPass function. */ +void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM); + +/** See llvm::createInstructionCombiningPass function. */ +void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM); + +/** See llvm::createJumpThreadingPass function. */ +void LLVMAddJumpThreadingPass(LLVMPassManagerRef PM); + +/** See llvm::createLICMPass function. */ +void LLVMAddLICMPass(LLVMPassManagerRef PM); + +/** See llvm::createLoopDeletionPass function. */ +void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM); + +/** See llvm::createLoopIndexSplitPass function. */ +void LLVMAddLoopIndexSplitPass(LLVMPassManagerRef PM); + +/** See llvm::createLoopRotatePass function. */ +void LLVMAddLoopRotatePass(LLVMPassManagerRef PM); + +/** See llvm::createLoopUnrollPass function. */ +void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM); + +/** See llvm::createLoopUnswitchPass function. */ +void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM); + +/** See llvm::createMemCpyOptPass function. */ +void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM); + +/** See llvm::createPromoteMemoryToRegisterPass function. */ +void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM); + +/** See llvm::createReassociatePass function. */ +void LLVMAddReassociatePass(LLVMPassManagerRef PM); + +/** See llvm::createSCCPPass function. */ +void LLVMAddSCCPPass(LLVMPassManagerRef PM); + +/** See llvm::createScalarReplAggregatesPass function. */ +void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM); + +/** See llvm::createSimplifyLibCallsPass function. */ +void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM); + +/** See llvm::createTailCallEliminationPass function. */ +void LLVMAddTailCallEliminationPass(LLVMPassManagerRef PM); + +/** See llvm::createConstantPropagationPass function. */ +void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM); + +/** See llvm::demotePromoteMemoryToRegisterPass function. */ +void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM); + +#ifdef __cplusplus +} +#endif /* defined(__cplusplus) */ + +#endif diff --git a/include/llvm-c/lto.h b/include/llvm-c/lto.h new file mode 100644 index 0000000..8198617 --- /dev/null +++ b/include/llvm-c/lto.h @@ -0,0 +1,250 @@ +/*===-- llvm-c/lto.h - LTO Public C Interface ---------------------*- C -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header provides public interface to an abstract link time optimization*| +|* library. LLVM provides an implementation of this interface for use with *| +|* llvm bitcode files. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LTO_H +#define LTO_H 1 + +#include <stdbool.h> +#include <stddef.h> + +typedef enum { + LTO_SYMBOL_ALIGNMENT_MASK = 0x0000001F, /* log2 of alignment */ + LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0, + LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0, + LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0, + LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080, + LTO_SYMBOL_DEFINITION_MASK = 0x00000700, + LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100, + LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200, + LTO_SYMBOL_DEFINITION_WEAK = 0x00000300, + LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400, + LTO_SYMBOL_DEFINITION_WEAKUNDEF = 0x00000500, + LTO_SYMBOL_SCOPE_MASK = 0x00003800, + LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800, + LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000, + LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000, + LTO_SYMBOL_SCOPE_DEFAULT = 0x00001800 +} lto_symbol_attributes; + +typedef enum { + LTO_DEBUG_MODEL_NONE = 0, + LTO_DEBUG_MODEL_DWARF = 1 +} lto_debug_model; + +typedef enum { + LTO_CODEGEN_PIC_MODEL_STATIC = 0, + LTO_CODEGEN_PIC_MODEL_DYNAMIC = 1, + LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC = 2 +} lto_codegen_model; + + +/** opaque reference to a loaded object module */ +typedef struct LTOModule* lto_module_t; + +/** opaque reference to a code generator */ +typedef struct LTOCodeGenerator* lto_code_gen_t; + + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Returns a printable string. + */ +extern const char* +lto_get_version(void); + + +/** + * Returns the last error string or NULL if last operation was sucessful. + */ +extern const char* +lto_get_error_message(void); + + +/** + * Checks if a file is a loadable object file. + */ +extern bool +lto_module_is_object_file(const char* path); + + +/** + * Checks if a file is a loadable object compiled for requested target. + */ +extern bool +lto_module_is_object_file_for_target(const char* path, + const char* target_triple_prefix); + + +/** + * Checks if a buffer is a loadable object file. + */ +extern bool +lto_module_is_object_file_in_memory(const void* mem, size_t length); + + +/** + * Checks if a buffer is a loadable object compiled for requested target. + */ +extern bool +lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length, + const char* target_triple_prefix); + + +/** + * Loads an object file from disk. + * Returns NULL on error (check lto_get_error_message() for details). + */ +extern lto_module_t +lto_module_create(const char* path); + + +/** + * Loads an object file from memory. + * Returns NULL on error (check lto_get_error_message() for details). + */ +extern lto_module_t +lto_module_create_from_memory(const void* mem, size_t length); + + +/** + * Frees all memory internally allocated by the module. + * Upon return the lto_module_t is no longer valid. + */ +extern void +lto_module_dispose(lto_module_t mod); + + +/** + * Returns triple string which the object module was compiled under. + */ +extern const char* +lto_module_get_target_triple(lto_module_t mod); + + +/** + * Returns the number of symbols in the object module. + */ +extern unsigned int +lto_module_get_num_symbols(lto_module_t mod); + + +/** + * Returns the name of the ith symbol in the object module. + */ +extern const char* +lto_module_get_symbol_name(lto_module_t mod, unsigned int index); + + +/** + * Returns the attributes of the ith symbol in the object module. + */ +extern lto_symbol_attributes +lto_module_get_symbol_attribute(lto_module_t mod, unsigned int index); + + +/** + * Instantiates a code generator. + * Returns NULL on error (check lto_get_error_message() for details). + */ +extern lto_code_gen_t +lto_codegen_create(void); + + +/** + * Frees all code generator and all memory it internally allocated. + * Upon return the lto_code_gen_t is no longer valid. + */ +extern void +lto_codegen_dispose(lto_code_gen_t); + + + +/** + * Add an object module to the set of modules for which code will be generated. + * Returns true on error (check lto_get_error_message() for details). + */ +extern bool +lto_codegen_add_module(lto_code_gen_t cg, lto_module_t mod); + + + +/** + * Sets if debug info should be generated. + * Returns true on error (check lto_get_error_message() for details). + */ +extern bool +lto_codegen_set_debug_model(lto_code_gen_t cg, lto_debug_model); + + +/** + * Sets which PIC code model to generated. + * Returns true on error (check lto_get_error_message() for details). + */ +extern bool +lto_codegen_set_pic_model(lto_code_gen_t cg, lto_codegen_model); + + +/** + * Sets the location of the "gcc" to run. If not set, libLTO will search for + * "gcc" on the path. + */ +extern void +lto_codegen_set_gcc_path(lto_code_gen_t cg, const char* path); + + +/** + * Adds to a list of all global symbols that must exist in the final + * generated code. If a function is not listed, it might be + * inlined into every usage and optimized away. + */ +extern void +lto_codegen_add_must_preserve_symbol(lto_code_gen_t cg, const char* symbol); + + +/** + * Writes a new object file at the specified path that contains the + * merged contents of all modules added so far. + * Returns true on error (check lto_get_error_message() for details). + */ +extern bool +lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path); + + +/** + * Generates code for all added modules into one native object file. + * On sucess returns a pointer to a generated mach-o/ELF buffer and + * length set to the buffer size. The buffer is owned by the + * lto_code_gen_t and will be freed when lto_codegen_dispose() + * is called, or lto_codegen_compile() is called again. + * On failure, returns NULL (check lto_get_error_message() for details). + */ +extern const void* +lto_codegen_compile(lto_code_gen_t cg, size_t* length); + + +/** + * Sets options to help debug codegen bugs. + */ +extern void +lto_codegen_debug_options(lto_code_gen_t cg, const char *); +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h new file mode 100644 index 0000000..928ecc0 --- /dev/null +++ b/include/llvm/ADT/APFloat.h @@ -0,0 +1,367 @@ +//== llvm/Support/APFloat.h - Arbitrary Precision Floating Point -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares a class to represent arbitrary precision floating +// point values and provide a variety of arithmetic operations on them. +// +//===----------------------------------------------------------------------===// + +/* A self-contained host- and target-independent arbitrary-precision + floating-point software implementation. It uses bignum integer + arithmetic as provided by static functions in the APInt class. + The library will work with bignum integers whose parts are any + unsigned type at least 16 bits wide, but 64 bits is recommended. + + Written for clarity rather than speed, in particular with a view + to use in the front-end of a cross compiler so that target + arithmetic can be correctly performed on the host. Performance + should nonetheless be reasonable, particularly for its intended + use. It may be useful as a base implementation for a run-time + library during development of a faster target-specific one. + + All 5 rounding modes in the IEEE-754R draft are handled correctly + for all implemented operations. Currently implemented operations + are add, subtract, multiply, divide, fused-multiply-add, + conversion-to-float, conversion-to-integer and + conversion-from-integer. New rounding modes (e.g. away from zero) + can be added with three or four lines of code. + + Four formats are built-in: IEEE single precision, double + precision, quadruple precision, and x87 80-bit extended double + (when operating with full extended precision). Adding a new + format that obeys IEEE semantics only requires adding two lines of + code: a declaration and definition of the format. + + All operations return the status of that operation as an exception + bit-mask, so multiple operations can be done consecutively with + their results or-ed together. The returned status can be useful + for compiler diagnostics; e.g., inexact, underflow and overflow + can be easily diagnosed on constant folding, and compiler + optimizers can determine what exceptions would be raised by + folding operations and optimize, or perhaps not optimize, + accordingly. + + At present, underflow tininess is detected after rounding; it + should be straight forward to add support for the before-rounding + case too. + + The library reads hexadecimal floating point numbers as per C99, + and correctly rounds if necessary according to the specified + rounding mode. Syntax is required to have been validated by the + caller. It also converts floating point numbers to hexadecimal + text as per the C99 %a and %A conversions. The output precision + (or alternatively the natural minimal precision) can be specified; + if the requested precision is less than the natural precision the + output is correctly rounded for the specified rounding mode. + + It also reads decimal floating point numbers and correctly rounds + according to the specified rounding mode. + + Conversion to decimal text is not currently implemented. + + Non-zero finite numbers are represented internally as a sign bit, + a 16-bit signed exponent, and the significand as an array of + integer parts. After normalization of a number of precision P the + exponent is within the range of the format, and if the number is + not denormal the P-th bit of the significand is set as an explicit + integer bit. For denormals the most significant bit is shifted + right so that the exponent is maintained at the format's minimum, + so that the smallest denormal has just the least significant bit + of the significand set. The sign of zeroes and infinities is + significant; the exponent and significand of such numbers is not + stored, but has a known implicit (deterministic) value: 0 for the + significands, 0 for zero exponent, all 1 bits for infinity + exponent. For NaNs the sign and significand are deterministic, + although not really meaningful, and preserved in non-conversion + operations. The exponent is implicitly all 1 bits. + + TODO + ==== + + Some features that may or may not be worth adding: + + Binary to decimal conversion (hard). + + Optional ability to detect underflow tininess before rounding. + + New formats: x87 in single and double precision mode (IEEE apart + from extended exponent range) (hard). + + New operations: sqrt, IEEE remainder, C90 fmod, nextafter, + nexttoward. +*/ + +#ifndef LLVM_FLOAT_H +#define LLVM_FLOAT_H + +// APInt contains static functions implementing bignum arithmetic. +#include "llvm/ADT/APInt.h" + +namespace llvm { + + /* Exponents are stored as signed numbers. */ + typedef signed short exponent_t; + + struct fltSemantics; + + /* When bits of a floating point number are truncated, this enum is + used to indicate what fraction of the LSB those bits represented. + It essentially combines the roles of guard and sticky bits. */ + enum lostFraction { // Example of truncated bits: + lfExactlyZero, // 000000 + lfLessThanHalf, // 0xxxxx x's not all zero + lfExactlyHalf, // 100000 + lfMoreThanHalf // 1xxxxx x's not all zero + }; + + class APFloat { + public: + + /* We support the following floating point semantics. */ + static const fltSemantics IEEEsingle; + static const fltSemantics IEEEdouble; + static const fltSemantics IEEEquad; + static const fltSemantics PPCDoubleDouble; + static const fltSemantics x87DoubleExtended; + /* And this pseudo, used to construct APFloats that cannot + conflict with anything real. */ + static const fltSemantics Bogus; + + static unsigned int semanticsPrecision(const fltSemantics &); + + /* Floating point numbers have a four-state comparison relation. */ + enum cmpResult { + cmpLessThan, + cmpEqual, + cmpGreaterThan, + cmpUnordered + }; + + /* IEEE-754R gives five rounding modes. */ + enum roundingMode { + rmNearestTiesToEven, + rmTowardPositive, + rmTowardNegative, + rmTowardZero, + rmNearestTiesToAway + }; + + // Operation status. opUnderflow or opOverflow are always returned + // or-ed with opInexact. + enum opStatus { + opOK = 0x00, + opInvalidOp = 0x01, + opDivByZero = 0x02, + opOverflow = 0x04, + opUnderflow = 0x08, + opInexact = 0x10 + }; + + // Category of internally-represented number. + enum fltCategory { + fcInfinity, + fcNaN, + fcNormal, + fcZero + }; + + // Constructors. + APFloat(const fltSemantics &, const char *); + APFloat(const fltSemantics &, integerPart); + APFloat(const fltSemantics &, fltCategory, bool negative, unsigned type=0); + explicit APFloat(double d); + explicit APFloat(float f); + explicit APFloat(const APInt &, bool isIEEE = false); + APFloat(const APFloat &); + ~APFloat(); + + // Convenience "constructors" + static APFloat getZero(const fltSemantics &Sem, bool Negative = false) { + return APFloat(Sem, fcZero, Negative); + } + static APFloat getInf(const fltSemantics &Sem, bool Negative = false) { + return APFloat(Sem, fcInfinity, Negative); + } + /// getNaN - Factory for QNaN values. + /// + /// \param Negative - True iff the NaN generated should be negative. + /// \param type - The unspecified fill bits for creating the NaN, 0 by + /// default. The value is truncated as necessary. + static APFloat getNaN(const fltSemantics &Sem, bool Negative = false, + unsigned type = 0) { + return APFloat(Sem, fcNaN, Negative, type); + } + + /// Profile - Used to insert APFloat objects, or objects that contain + /// APFloat objects, into FoldingSets. + void Profile(FoldingSetNodeID& NID) const; + + /// @brief Used by the Bitcode serializer to emit APInts to Bitcode. + void Emit(Serializer& S) const; + + /// @brief Used by the Bitcode deserializer to deserialize APInts. + static APFloat ReadVal(Deserializer& D); + + /* Arithmetic. */ + opStatus add(const APFloat &, roundingMode); + opStatus subtract(const APFloat &, roundingMode); + opStatus multiply(const APFloat &, roundingMode); + opStatus divide(const APFloat &, roundingMode); + /* IEEE remainder. */ + opStatus remainder(const APFloat &); + /* C fmod, or llvm frem. */ + opStatus mod(const APFloat &, roundingMode); + opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode); + + /* Sign operations. */ + void changeSign(); + void clearSign(); + void copySign(const APFloat &); + + /* Conversions. */ + opStatus convert(const fltSemantics &, roundingMode, bool *); + opStatus convertToInteger(integerPart *, unsigned int, bool, + roundingMode, bool *) const; + opStatus convertFromAPInt(const APInt &, + bool, roundingMode); + opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int, + bool, roundingMode); + opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int, + bool, roundingMode); + opStatus convertFromString(const char *, roundingMode); + APInt bitcastToAPInt() const; + double convertToDouble() const; + float convertToFloat() const; + + /* The definition of equality is not straightforward for floating point, + so we won't use operator==. Use one of the following, or write + whatever it is you really mean. */ + // bool operator==(const APFloat &) const; // DO NOT IMPLEMENT + + /* IEEE comparison with another floating point number (NaNs + compare unordered, 0==-0). */ + cmpResult compare(const APFloat &) const; + + /* Bitwise comparison for equality (QNaNs compare equal, 0!=-0). */ + bool bitwiseIsEqual(const APFloat &) const; + + /* Write out a hexadecimal representation of the floating point + value to DST, which must be of sufficient size, in the C99 form + [-]0xh.hhhhp[+-]d. Return the number of characters written, + excluding the terminating NUL. */ + unsigned int convertToHexString(char *dst, unsigned int hexDigits, + bool upperCase, roundingMode) const; + + /* Simple queries. */ + fltCategory getCategory() const { return category; } + const fltSemantics &getSemantics() const { return *semantics; } + bool isZero() const { return category == fcZero; } + bool isNonZero() const { return category != fcZero; } + bool isNaN() const { return category == fcNaN; } + bool isInfinity() const { return category == fcInfinity; } + bool isNegative() const { return sign; } + bool isPosZero() const { return isZero() && !isNegative(); } + bool isNegZero() const { return isZero() && isNegative(); } + + APFloat& operator=(const APFloat &); + + /* Return an arbitrary integer value usable for hashing. */ + uint32_t getHashValue() const; + + private: + + /* Trivial queries. */ + integerPart *significandParts(); + const integerPart *significandParts() const; + unsigned int partCount() const; + + /* Significand operations. */ + integerPart addSignificand(const APFloat &); + integerPart subtractSignificand(const APFloat &, integerPart); + lostFraction addOrSubtractSignificand(const APFloat &, bool subtract); + lostFraction multiplySignificand(const APFloat &, const APFloat *); + lostFraction divideSignificand(const APFloat &); + void incrementSignificand(); + void initialize(const fltSemantics *); + void shiftSignificandLeft(unsigned int); + lostFraction shiftSignificandRight(unsigned int); + unsigned int significandLSB() const; + unsigned int significandMSB() const; + void zeroSignificand(); + + /* Arithmetic on special values. */ + opStatus addOrSubtractSpecials(const APFloat &, bool subtract); + opStatus divideSpecials(const APFloat &); + opStatus multiplySpecials(const APFloat &); + opStatus modSpecials(const APFloat &); + + /* Miscellany. */ + void makeNaN(unsigned = 0); + opStatus normalize(roundingMode, lostFraction); + opStatus addOrSubtract(const APFloat &, roundingMode, bool subtract); + cmpResult compareAbsoluteValue(const APFloat &) const; + opStatus handleOverflow(roundingMode); + bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const; + opStatus convertToSignExtendedInteger(integerPart *, unsigned int, bool, + roundingMode, bool *) const; + opStatus convertFromUnsignedParts(const integerPart *, unsigned int, + roundingMode); + opStatus convertFromHexadecimalString(const char *, roundingMode); + opStatus convertFromDecimalString (const char *, roundingMode); + char *convertNormalToHexString(char *, unsigned int, bool, + roundingMode) const; + opStatus roundSignificandWithExponent(const integerPart *, unsigned int, + int, roundingMode); + + APInt convertFloatAPFloatToAPInt() const; + APInt convertDoubleAPFloatToAPInt() const; + APInt convertF80LongDoubleAPFloatToAPInt() const; + APInt convertPPCDoubleDoubleAPFloatToAPInt() const; + void initFromAPInt(const APInt& api, bool isIEEE = false); + void initFromFloatAPInt(const APInt& api); + void initFromDoubleAPInt(const APInt& api); + void initFromF80LongDoubleAPInt(const APInt& api); + void initFromPPCDoubleDoubleAPInt(const APInt& api); + + void assign(const APFloat &); + void copySignificand(const APFloat &); + void freeSignificand(); + + /* What kind of semantics does this value obey? */ + const fltSemantics *semantics; + + /* Significand - the fraction with an explicit integer bit. Must be + at least one bit wider than the target precision. */ + union Significand + { + integerPart part; + integerPart *parts; + } significand; + + /* The exponent - a signed number. */ + exponent_t exponent; + + /* What kind of floating point number this is. */ + /* Only 2 bits are required, but VisualStudio incorrectly sign extends + it. Using the extra bit keeps it from failing under VisualStudio */ + fltCategory category: 3; + + /* The sign bit of this number. */ + unsigned int sign: 1; + + /* For PPCDoubleDouble, we have a second exponent and sign (the second + significand is appended to the first one, although it would be wrong to + regard these as a single number for arithmetic purposes). These fields + are not meaningful for any other type. */ + exponent_t exponent2 : 11; + unsigned int sign2: 1; + }; +} /* namespace llvm */ + +#endif /* LLVM_FLOAT_H */ diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h new file mode 100644 index 0000000..63bf4f6 --- /dev/null +++ b/include/llvm/ADT/APInt.h @@ -0,0 +1,1612 @@ +//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a class to represent arbitrary precision integral +// constant values and operations on them. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_APINT_H +#define LLVM_APINT_H + +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/MathExtras.h" +#include <cassert> +#include <climits> +#include <cstring> +#include <string> + +namespace llvm { + class Serializer; + class Deserializer; + class FoldingSetNodeID; + class raw_ostream; + + template<typename T> + class SmallVectorImpl; + + /* An unsigned host type used as a single part of a multi-part + bignum. */ + typedef uint64_t integerPart; + + const unsigned int host_char_bit = 8; + const unsigned int integerPartWidth = host_char_bit * + static_cast<unsigned int>(sizeof(integerPart)); + +//===----------------------------------------------------------------------===// +// APInt Class +//===----------------------------------------------------------------------===// + +/// APInt - This class represents arbitrary precision constant integral values. +/// It is a functional replacement for common case unsigned integer type like +/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width +/// integer sizes and large integer value types such as 3-bits, 15-bits, or more +/// than 64-bits of precision. APInt provides a variety of arithmetic operators +/// and methods to manipulate integer values of any bit-width. It supports both +/// the typical integer arithmetic and comparison operations as well as bitwise +/// manipulation. +/// +/// The class has several invariants worth noting: +/// * All bit, byte, and word positions are zero-based. +/// * Once the bit width is set, it doesn't change except by the Truncate, +/// SignExtend, or ZeroExtend operations. +/// * All binary operators must be on APInt instances of the same bit width. +/// Attempting to use these operators on instances with different bit +/// widths will yield an assertion. +/// * The value is stored canonically as an unsigned value. For operations +/// where it makes a difference, there are both signed and unsigned variants +/// of the operation. For example, sdiv and udiv. However, because the bit +/// widths must be the same, operations such as Mul and Add produce the same +/// results regardless of whether the values are interpreted as signed or +/// not. +/// * In general, the class tries to follow the style of computation that LLVM +/// uses in its IR. This simplifies its use for LLVM. +/// +/// @brief Class for arbitrary precision integers. +class APInt { + unsigned BitWidth; ///< The number of bits in this APInt. + + /// This union is used to store the integer value. When the + /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. + union { + uint64_t VAL; ///< Used to store the <= 64 bits integer value. + uint64_t *pVal; ///< Used to store the >64 bits integer value. + }; + + /// This enum is used to hold the constants we needed for APInt. + enum { + /// Bits in a word + APINT_BITS_PER_WORD = static_cast<unsigned int>(sizeof(uint64_t)) * + CHAR_BIT, + /// Byte size of a word + APINT_WORD_SIZE = static_cast<unsigned int>(sizeof(uint64_t)) + }; + + /// This constructor is used only internally for speed of construction of + /// temporaries. It is unsafe for general use so it is not public. + /// @brief Fast internal constructor + APInt(uint64_t* val, unsigned bits) : BitWidth(bits), pVal(val) { } + + /// @returns true if the number of bits <= 64, false otherwise. + /// @brief Determine if this APInt just has one word to store value. + bool isSingleWord() const { + return BitWidth <= APINT_BITS_PER_WORD; + } + + /// @returns the word position for the specified bit position. + /// @brief Determine which word a bit is in. + static unsigned whichWord(unsigned bitPosition) { + return bitPosition / APINT_BITS_PER_WORD; + } + + /// @returns the bit position in a word for the specified bit position + /// in the APInt. + /// @brief Determine which bit in a word a bit is in. + static unsigned whichBit(unsigned bitPosition) { + return bitPosition % APINT_BITS_PER_WORD; + } + + /// This method generates and returns a uint64_t (word) mask for a single + /// bit at a specific bit position. This is used to mask the bit in the + /// corresponding word. + /// @returns a uint64_t with only bit at "whichBit(bitPosition)" set + /// @brief Get a single bit mask. + static uint64_t maskBit(unsigned bitPosition) { + return 1ULL << whichBit(bitPosition); + } + + /// This method is used internally to clear the to "N" bits in the high order + /// word that are not used by the APInt. This is needed after the most + /// significant word is assigned a value to ensure that those bits are + /// zero'd out. + /// @brief Clear unused high order bits + APInt& clearUnusedBits() { + // Compute how many bits are used in the final word + unsigned wordBits = BitWidth % APINT_BITS_PER_WORD; + if (wordBits == 0) + // If all bits are used, we want to leave the value alone. This also + // avoids the undefined behavior of >> when the shift is the same size as + // the word size (64). + return *this; + + // Mask out the high bits. + uint64_t mask = ~uint64_t(0ULL) >> (APINT_BITS_PER_WORD - wordBits); + if (isSingleWord()) + VAL &= mask; + else + pVal[getNumWords() - 1] &= mask; + return *this; + } + + /// @returns the corresponding word for the specified bit position. + /// @brief Get the word corresponding to a bit position + uint64_t getWord(unsigned bitPosition) const { + return isSingleWord() ? VAL : pVal[whichWord(bitPosition)]; + } + + /// This is used by the constructors that take string arguments. + /// @brief Convert a char array into an APInt + void fromString(unsigned numBits, const char *strStart, unsigned slen, + uint8_t radix); + + /// This is used by the toString method to divide by the radix. It simply + /// provides a more convenient form of divide for internal use since KnuthDiv + /// has specific constraints on its inputs. If those constraints are not met + /// then it provides a simpler form of divide. + /// @brief An internal division function for dividing APInts. + static void divide(const APInt LHS, unsigned lhsWords, + const APInt &RHS, unsigned rhsWords, + APInt *Quotient, APInt *Remainder); + + /// out-of-line slow case for inline constructor + void initSlowCase(unsigned numBits, uint64_t val, bool isSigned); + + /// out-of-line slow case for inline copy constructor + void initSlowCase(const APInt& that); + + /// out-of-line slow case for shl + APInt shlSlowCase(unsigned shiftAmt) const; + + /// out-of-line slow case for operator& + APInt AndSlowCase(const APInt& RHS) const; + + /// out-of-line slow case for operator| + APInt OrSlowCase(const APInt& RHS) const; + + /// out-of-line slow case for operator^ + APInt XorSlowCase(const APInt& RHS) const; + + /// out-of-line slow case for operator= + APInt& AssignSlowCase(const APInt& RHS); + + /// out-of-line slow case for operator== + bool EqualSlowCase(const APInt& RHS) const; + + /// out-of-line slow case for operator== + bool EqualSlowCase(uint64_t Val) const; + + /// out-of-line slow case for countLeadingZeros + unsigned countLeadingZerosSlowCase() const; + + /// out-of-line slow case for countTrailingOnes + unsigned countTrailingOnesSlowCase() const; + + /// out-of-line slow case for countPopulation + unsigned countPopulationSlowCase() const; + +public: + /// @name Constructors + /// @{ + /// If isSigned is true then val is treated as if it were a signed value + /// (i.e. as an int64_t) and the appropriate sign extension to the bit width + /// will be done. Otherwise, no sign extension occurs (high order bits beyond + /// the range of val are zero filled). + /// @param numBits the bit width of the constructed APInt + /// @param val the initial value of the APInt + /// @param isSigned how to treat signedness of val + /// @brief Create a new APInt of numBits width, initialized as val. + APInt(unsigned numBits, uint64_t val, bool isSigned = false) + : BitWidth(numBits), VAL(0) { + assert(BitWidth && "bitwidth too small"); + if (isSingleWord()) + VAL = val; + else + initSlowCase(numBits, val, isSigned); + clearUnusedBits(); + } + + /// Note that numWords can be smaller or larger than the corresponding bit + /// width but any extraneous bits will be dropped. + /// @param numBits the bit width of the constructed APInt + /// @param numWords the number of words in bigVal + /// @param bigVal a sequence of words to form the initial value of the APInt + /// @brief Construct an APInt of numBits width, initialized as bigVal[]. + APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]); + + /// This constructor interprets the slen characters starting at StrStart as + /// a string in the given radix. The interpretation stops when the first + /// character that is not suitable for the radix is encountered. Acceptable + /// radix values are 2, 8, 10 and 16. It is an error for the value implied by + /// the string to require more bits than numBits. + /// @param numBits the bit width of the constructed APInt + /// @param strStart the start of the string to be interpreted + /// @param slen the maximum number of characters to interpret + /// @param radix the radix to use for the conversion + /// @brief Construct an APInt from a string representation. + APInt(unsigned numBits, const char strStart[], unsigned slen, uint8_t radix); + + /// Simply makes *this a copy of that. + /// @brief Copy Constructor. + APInt(const APInt& that) + : BitWidth(that.BitWidth), VAL(0) { + assert(BitWidth && "bitwidth too small"); + if (isSingleWord()) + VAL = that.VAL; + else + initSlowCase(that); + } + + /// @brief Destructor. + ~APInt() { + if (!isSingleWord()) + delete [] pVal; + } + + /// Default constructor that creates an uninitialized APInt. This is useful + /// for object deserialization (pair this with the static method Read). + explicit APInt() : BitWidth(1) {} + + /// Profile - Used to insert APInt objects, or objects that contain APInt + /// objects, into FoldingSets. + void Profile(FoldingSetNodeID& id) const; + + /// @brief Used by the Bitcode serializer to emit APInts to Bitcode. + void Emit(Serializer& S) const; + + /// @brief Used by the Bitcode deserializer to deserialize APInts. + void Read(Deserializer& D); + + /// @} + /// @name Value Tests + /// @{ + /// This tests the high bit of this APInt to determine if it is set. + /// @returns true if this APInt is negative, false otherwise + /// @brief Determine sign of this APInt. + bool isNegative() const { + return (*this)[BitWidth - 1]; + } + + /// This tests the high bit of the APInt to determine if it is unset. + /// @brief Determine if this APInt Value is non-negative (>= 0) + bool isNonNegative() const { + return !isNegative(); + } + + /// This tests if the value of this APInt is positive (> 0). Note + /// that 0 is not a positive value. + /// @returns true if this APInt is positive. + /// @brief Determine if this APInt Value is positive. + bool isStrictlyPositive() const { + return isNonNegative() && (*this) != 0; + } + + /// This checks to see if the value has all bits of the APInt are set or not. + /// @brief Determine if all bits are set + bool isAllOnesValue() const { + return countPopulation() == BitWidth; + } + + /// This checks to see if the value of this APInt is the maximum unsigned + /// value for the APInt's bit width. + /// @brief Determine if this is the largest unsigned value. + bool isMaxValue() const { + return countPopulation() == BitWidth; + } + + /// This checks to see if the value of this APInt is the maximum signed + /// value for the APInt's bit width. + /// @brief Determine if this is the largest signed value. + bool isMaxSignedValue() const { + return BitWidth == 1 ? VAL == 0 : + !isNegative() && countPopulation() == BitWidth - 1; + } + + /// This checks to see if the value of this APInt is the minimum unsigned + /// value for the APInt's bit width. + /// @brief Determine if this is the smallest unsigned value. + bool isMinValue() const { + return countPopulation() == 0; + } + + /// This checks to see if the value of this APInt is the minimum signed + /// value for the APInt's bit width. + /// @brief Determine if this is the smallest signed value. + bool isMinSignedValue() const { + return BitWidth == 1 ? VAL == 1 : + isNegative() && countPopulation() == 1; + } + + /// @brief Check if this APInt has an N-bits unsigned integer value. + bool isIntN(unsigned N) const { + assert(N && "N == 0 ???"); + if (N >= getBitWidth()) + return true; + + if (isSingleWord()) + return VAL == (VAL & (~0ULL >> (64 - N))); + APInt Tmp(N, getNumWords(), pVal); + Tmp.zext(getBitWidth()); + return Tmp == (*this); + } + + /// @brief Check if this APInt has an N-bits signed integer value. + bool isSignedIntN(unsigned N) const { + assert(N && "N == 0 ???"); + return getMinSignedBits() <= N; + } + + /// @returns true if the argument APInt value is a power of two > 0. + bool isPowerOf2() const; + + /// isSignBit - Return true if this is the value returned by getSignBit. + bool isSignBit() const { return isMinSignedValue(); } + + /// This converts the APInt to a boolean value as a test against zero. + /// @brief Boolean conversion function. + bool getBoolValue() const { + return *this != 0; + } + + /// getLimitedValue - If this value is smaller than the specified limit, + /// return it, otherwise return the limit value. This causes the value + /// to saturate to the limit. + uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const { + return (getActiveBits() > 64 || getZExtValue() > Limit) ? + Limit : getZExtValue(); + } + + /// @} + /// @name Value Generators + /// @{ + /// @brief Gets maximum unsigned value of APInt for specific bit width. + static APInt getMaxValue(unsigned numBits) { + return APInt(numBits, 0).set(); + } + + /// @brief Gets maximum signed value of APInt for a specific bit width. + static APInt getSignedMaxValue(unsigned numBits) { + return APInt(numBits, 0).set().clear(numBits - 1); + } + + /// @brief Gets minimum unsigned value of APInt for a specific bit width. + static APInt getMinValue(unsigned numBits) { + return APInt(numBits, 0); + } + + /// @brief Gets minimum signed value of APInt for a specific bit width. + static APInt getSignedMinValue(unsigned numBits) { + return APInt(numBits, 0).set(numBits - 1); + } + + /// getSignBit - This is just a wrapper function of getSignedMinValue(), and + /// it helps code readability when we want to get a SignBit. + /// @brief Get the SignBit for a specific bit width. + static APInt getSignBit(unsigned BitWidth) { + return getSignedMinValue(BitWidth); + } + + /// @returns the all-ones value for an APInt of the specified bit-width. + /// @brief Get the all-ones value. + static APInt getAllOnesValue(unsigned numBits) { + return APInt(numBits, 0).set(); + } + + /// @returns the '0' value for an APInt of the specified bit-width. + /// @brief Get the '0' value. + static APInt getNullValue(unsigned numBits) { + return APInt(numBits, 0); + } + + /// Get an APInt with the same BitWidth as this APInt, just zero mask + /// the low bits and right shift to the least significant bit. + /// @returns the high "numBits" bits of this APInt. + APInt getHiBits(unsigned numBits) const; + + /// Get an APInt with the same BitWidth as this APInt, just zero mask + /// the high bits. + /// @returns the low "numBits" bits of this APInt. + APInt getLoBits(unsigned numBits) const; + + /// Constructs an APInt value that has a contiguous range of bits set. The + /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other + /// bits will be zero. For example, with parameters(32, 0, 16) you would get + /// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For + /// example, with parameters (32, 28, 4), you would get 0xF000000F. + /// @param numBits the intended bit width of the result + /// @param loBit the index of the lowest bit set. + /// @param hiBit the index of the highest bit set. + /// @returns An APInt value with the requested bits set. + /// @brief Get a value with a block of bits set. + static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) { + assert(hiBit <= numBits && "hiBit out of range"); + assert(loBit < numBits && "loBit out of range"); + if (hiBit < loBit) + return getLowBitsSet(numBits, hiBit) | + getHighBitsSet(numBits, numBits-loBit); + return getLowBitsSet(numBits, hiBit-loBit).shl(loBit); + } + + /// Constructs an APInt value that has the top hiBitsSet bits set. + /// @param numBits the bitwidth of the result + /// @param hiBitsSet the number of high-order bits set in the result. + /// @brief Get a value with high bits set + static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) { + assert(hiBitsSet <= numBits && "Too many bits to set!"); + // Handle a degenerate case, to avoid shifting by word size + if (hiBitsSet == 0) + return APInt(numBits, 0); + unsigned shiftAmt = numBits - hiBitsSet; + // For small values, return quickly + if (numBits <= APINT_BITS_PER_WORD) + return APInt(numBits, ~0ULL << shiftAmt); + return (~APInt(numBits, 0)).shl(shiftAmt); + } + + /// Constructs an APInt value that has the bottom loBitsSet bits set. + /// @param numBits the bitwidth of the result + /// @param loBitsSet the number of low-order bits set in the result. + /// @brief Get a value with low bits set + static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) { + assert(loBitsSet <= numBits && "Too many bits to set!"); + // Handle a degenerate case, to avoid shifting by word size + if (loBitsSet == 0) + return APInt(numBits, 0); + if (loBitsSet == APINT_BITS_PER_WORD) + return APInt(numBits, -1ULL); + // For small values, return quickly. + if (numBits < APINT_BITS_PER_WORD) + return APInt(numBits, (1ULL << loBitsSet) - 1); + return (~APInt(numBits, 0)).lshr(numBits - loBitsSet); + } + + /// The hash value is computed as the sum of the words and the bit width. + /// @returns A hash value computed from the sum of the APInt words. + /// @brief Get a hash value based on this APInt + uint64_t getHashValue() const; + + /// This function returns a pointer to the internal storage of the APInt. + /// This is useful for writing out the APInt in binary form without any + /// conversions. + const uint64_t* getRawData() const { + if (isSingleWord()) + return &VAL; + return &pVal[0]; + } + + /// @} + /// @name Unary Operators + /// @{ + /// @returns a new APInt value representing *this incremented by one + /// @brief Postfix increment operator. + const APInt operator++(int) { + APInt API(*this); + ++(*this); + return API; + } + + /// @returns *this incremented by one + /// @brief Prefix increment operator. + APInt& operator++(); + + /// @returns a new APInt representing *this decremented by one. + /// @brief Postfix decrement operator. + const APInt operator--(int) { + APInt API(*this); + --(*this); + return API; + } + + /// @returns *this decremented by one. + /// @brief Prefix decrement operator. + APInt& operator--(); + + /// Performs a bitwise complement operation on this APInt. + /// @returns an APInt that is the bitwise complement of *this + /// @brief Unary bitwise complement operator. + APInt operator~() const { + APInt Result(*this); + Result.flip(); + return Result; + } + + /// Negates *this using two's complement logic. + /// @returns An APInt value representing the negation of *this. + /// @brief Unary negation operator + APInt operator-() const { + return APInt(BitWidth, 0) - (*this); + } + + /// Performs logical negation operation on this APInt. + /// @returns true if *this is zero, false otherwise. + /// @brief Logical negation operator. + bool operator!() const; + + /// @} + /// @name Assignment Operators + /// @{ + /// @returns *this after assignment of RHS. + /// @brief Copy assignment operator. + APInt& operator=(const APInt& RHS) { + // If the bitwidths are the same, we can avoid mucking with memory + if (isSingleWord() && RHS.isSingleWord()) { + VAL = RHS.VAL; + BitWidth = RHS.BitWidth; + return clearUnusedBits(); + } + + return AssignSlowCase(RHS); + } + + /// The RHS value is assigned to *this. If the significant bits in RHS exceed + /// the bit width, the excess bits are truncated. If the bit width is larger + /// than 64, the value is zero filled in the unspecified high order bits. + /// @returns *this after assignment of RHS value. + /// @brief Assignment operator. + APInt& operator=(uint64_t RHS); + + /// Performs a bitwise AND operation on this APInt and RHS. The result is + /// assigned to *this. + /// @returns *this after ANDing with RHS. + /// @brief Bitwise AND assignment operator. + APInt& operator&=(const APInt& RHS); + + /// Performs a bitwise OR operation on this APInt and RHS. The result is + /// assigned *this; + /// @returns *this after ORing with RHS. + /// @brief Bitwise OR assignment operator. + APInt& operator|=(const APInt& RHS); + + /// Performs a bitwise XOR operation on this APInt and RHS. The result is + /// assigned to *this. + /// @returns *this after XORing with RHS. + /// @brief Bitwise XOR assignment operator. + APInt& operator^=(const APInt& RHS); + + /// Multiplies this APInt by RHS and assigns the result to *this. + /// @returns *this + /// @brief Multiplication assignment operator. + APInt& operator*=(const APInt& RHS); + + /// Adds RHS to *this and assigns the result to *this. + /// @returns *this + /// @brief Addition assignment operator. + APInt& operator+=(const APInt& RHS); + + /// Subtracts RHS from *this and assigns the result to *this. + /// @returns *this + /// @brief Subtraction assignment operator. + APInt& operator-=(const APInt& RHS); + + /// Shifts *this left by shiftAmt and assigns the result to *this. + /// @returns *this after shifting left by shiftAmt + /// @brief Left-shift assignment function. + APInt& operator<<=(unsigned shiftAmt) { + *this = shl(shiftAmt); + return *this; + } + + /// @} + /// @name Binary Operators + /// @{ + /// Performs a bitwise AND operation on *this and RHS. + /// @returns An APInt value representing the bitwise AND of *this and RHS. + /// @brief Bitwise AND operator. + APInt operator&(const APInt& RHS) const { + assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); + if (isSingleWord()) + return APInt(getBitWidth(), VAL & RHS.VAL); + return AndSlowCase(RHS); + } + APInt And(const APInt& RHS) const { + return this->operator&(RHS); + } + + /// Performs a bitwise OR operation on *this and RHS. + /// @returns An APInt value representing the bitwise OR of *this and RHS. + /// @brief Bitwise OR operator. + APInt operator|(const APInt& RHS) const { + assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); + if (isSingleWord()) + return APInt(getBitWidth(), VAL | RHS.VAL); + return OrSlowCase(RHS); + } + APInt Or(const APInt& RHS) const { + return this->operator|(RHS); + } + + /// Performs a bitwise XOR operation on *this and RHS. + /// @returns An APInt value representing the bitwise XOR of *this and RHS. + /// @brief Bitwise XOR operator. + APInt operator^(const APInt& RHS) const { + assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); + if (isSingleWord()) + return APInt(BitWidth, VAL ^ RHS.VAL); + return XorSlowCase(RHS); + } + APInt Xor(const APInt& RHS) const { + return this->operator^(RHS); + } + + /// Multiplies this APInt by RHS and returns the result. + /// @brief Multiplication operator. + APInt operator*(const APInt& RHS) const; + + /// Adds RHS to this APInt and returns the result. + /// @brief Addition operator. + APInt operator+(const APInt& RHS) const; + APInt operator+(uint64_t RHS) const { + return (*this) + APInt(BitWidth, RHS); + } + + /// Subtracts RHS from this APInt and returns the result. + /// @brief Subtraction operator. + APInt operator-(const APInt& RHS) const; + APInt operator-(uint64_t RHS) const { + return (*this) - APInt(BitWidth, RHS); + } + + APInt operator<<(unsigned Bits) const { + return shl(Bits); + } + + APInt operator<<(const APInt &Bits) const { + return shl(Bits); + } + + /// Arithmetic right-shift this APInt by shiftAmt. + /// @brief Arithmetic right-shift function. + APInt ashr(unsigned shiftAmt) const; + + /// Logical right-shift this APInt by shiftAmt. + /// @brief Logical right-shift function. + APInt lshr(unsigned shiftAmt) const; + + /// Left-shift this APInt by shiftAmt. + /// @brief Left-shift function. + APInt shl(unsigned shiftAmt) const { + assert(shiftAmt <= BitWidth && "Invalid shift amount"); + if (isSingleWord()) { + if (shiftAmt == BitWidth) + return APInt(BitWidth, 0); // avoid undefined shift results + return APInt(BitWidth, VAL << shiftAmt); + } + return shlSlowCase(shiftAmt); + } + + /// @brief Rotate left by rotateAmt. + APInt rotl(unsigned rotateAmt) const; + + /// @brief Rotate right by rotateAmt. + APInt rotr(unsigned rotateAmt) const; + + /// Arithmetic right-shift this APInt by shiftAmt. + /// @brief Arithmetic right-shift function. + APInt ashr(const APInt &shiftAmt) const; + + /// Logical right-shift this APInt by shiftAmt. + /// @brief Logical right-shift function. + APInt lshr(const APInt &shiftAmt) const; + + /// Left-shift this APInt by shiftAmt. + /// @brief Left-shift function. + APInt shl(const APInt &shiftAmt) const; + + /// @brief Rotate left by rotateAmt. + APInt rotl(const APInt &rotateAmt) const; + + /// @brief Rotate right by rotateAmt. + APInt rotr(const APInt &rotateAmt) const; + + /// Perform an unsigned divide operation on this APInt by RHS. Both this and + /// RHS are treated as unsigned quantities for purposes of this division. + /// @returns a new APInt value containing the division result + /// @brief Unsigned division operation. + APInt udiv(const APInt& RHS) const; + + /// Signed divide this APInt by APInt RHS. + /// @brief Signed division function for APInt. + APInt sdiv(const APInt& RHS) const { + if (isNegative()) + if (RHS.isNegative()) + return (-(*this)).udiv(-RHS); + else + return -((-(*this)).udiv(RHS)); + else if (RHS.isNegative()) + return -(this->udiv(-RHS)); + return this->udiv(RHS); + } + + /// Perform an unsigned remainder operation on this APInt with RHS being the + /// divisor. Both this and RHS are treated as unsigned quantities for purposes + /// of this operation. Note that this is a true remainder operation and not + /// a modulo operation because the sign follows the sign of the dividend + /// which is *this. + /// @returns a new APInt value containing the remainder result + /// @brief Unsigned remainder operation. + APInt urem(const APInt& RHS) const; + + /// Signed remainder operation on APInt. + /// @brief Function for signed remainder operation. + APInt srem(const APInt& RHS) const { + if (isNegative()) + if (RHS.isNegative()) + return -((-(*this)).urem(-RHS)); + else + return -((-(*this)).urem(RHS)); + else if (RHS.isNegative()) + return this->urem(-RHS); + return this->urem(RHS); + } + + /// Sometimes it is convenient to divide two APInt values and obtain both the + /// quotient and remainder. This function does both operations in the same + /// computation making it a little more efficient. The pair of input arguments + /// may overlap with the pair of output arguments. It is safe to call + /// udivrem(X, Y, X, Y), for example. + /// @brief Dual division/remainder interface. + static void udivrem(const APInt &LHS, const APInt &RHS, + APInt &Quotient, APInt &Remainder); + + static void sdivrem(const APInt &LHS, const APInt &RHS, + APInt &Quotient, APInt &Remainder) + { + if (LHS.isNegative()) { + if (RHS.isNegative()) + APInt::udivrem(-LHS, -RHS, Quotient, Remainder); + else + APInt::udivrem(-LHS, RHS, Quotient, Remainder); + Quotient = -Quotient; + Remainder = -Remainder; + } else if (RHS.isNegative()) { + APInt::udivrem(LHS, -RHS, Quotient, Remainder); + Quotient = -Quotient; + } else { + APInt::udivrem(LHS, RHS, Quotient, Remainder); + } + } + + /// @returns the bit value at bitPosition + /// @brief Array-indexing support. + bool operator[](unsigned bitPosition) const; + + /// @} + /// @name Comparison Operators + /// @{ + /// Compares this APInt with RHS for the validity of the equality + /// relationship. + /// @brief Equality operator. + bool operator==(const APInt& RHS) const { + assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths"); + if (isSingleWord()) + return VAL == RHS.VAL; + return EqualSlowCase(RHS); + } + + /// Compares this APInt with a uint64_t for the validity of the equality + /// relationship. + /// @returns true if *this == Val + /// @brief Equality operator. + bool operator==(uint64_t Val) const { + if (isSingleWord()) + return VAL == Val; + return EqualSlowCase(Val); + } + + /// Compares this APInt with RHS for the validity of the equality + /// relationship. + /// @returns true if *this == Val + /// @brief Equality comparison. + bool eq(const APInt &RHS) const { + return (*this) == RHS; + } + + /// Compares this APInt with RHS for the validity of the inequality + /// relationship. + /// @returns true if *this != Val + /// @brief Inequality operator. + bool operator!=(const APInt& RHS) const { + return !((*this) == RHS); + } + + /// Compares this APInt with a uint64_t for the validity of the inequality + /// relationship. + /// @returns true if *this != Val + /// @brief Inequality operator. + bool operator!=(uint64_t Val) const { + return !((*this) == Val); + } + + /// Compares this APInt with RHS for the validity of the inequality + /// relationship. + /// @returns true if *this != Val + /// @brief Inequality comparison + bool ne(const APInt &RHS) const { + return !((*this) == RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// the validity of the less-than relationship. + /// @returns true if *this < RHS when both are considered unsigned. + /// @brief Unsigned less than comparison + bool ult(const APInt& RHS) const; + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the less-than relationship. + /// @returns true if *this < RHS when both are considered signed. + /// @brief Signed less than comparison + bool slt(const APInt& RHS) const; + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// validity of the less-or-equal relationship. + /// @returns true if *this <= RHS when both are considered unsigned. + /// @brief Unsigned less or equal comparison + bool ule(const APInt& RHS) const { + return ult(RHS) || eq(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the less-or-equal relationship. + /// @returns true if *this <= RHS when both are considered signed. + /// @brief Signed less or equal comparison + bool sle(const APInt& RHS) const { + return slt(RHS) || eq(RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// the validity of the greater-than relationship. + /// @returns true if *this > RHS when both are considered unsigned. + /// @brief Unsigned greather than comparison + bool ugt(const APInt& RHS) const { + return !ult(RHS) && !eq(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// the validity of the greater-than relationship. + /// @returns true if *this > RHS when both are considered signed. + /// @brief Signed greather than comparison + bool sgt(const APInt& RHS) const { + return !slt(RHS) && !eq(RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// validity of the greater-or-equal relationship. + /// @returns true if *this >= RHS when both are considered unsigned. + /// @brief Unsigned greater or equal comparison + bool uge(const APInt& RHS) const { + return !ult(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the greater-or-equal relationship. + /// @returns true if *this >= RHS when both are considered signed. + /// @brief Signed greather or equal comparison + bool sge(const APInt& RHS) const { + return !slt(RHS); + } + + /// This operation tests if there are any pairs of corresponding bits + /// between this APInt and RHS that are both set. + bool intersects(const APInt &RHS) const { + return (*this & RHS) != 0; + } + + /// @} + /// @name Resizing Operators + /// @{ + /// Truncate the APInt to a specified width. It is an error to specify a width + /// that is greater than or equal to the current width. + /// @brief Truncate to new width. + APInt &trunc(unsigned width); + + /// This operation sign extends the APInt to a new width. If the high order + /// bit is set, the fill on the left will be done with 1 bits, otherwise zero. + /// It is an error to specify a width that is less than or equal to the + /// current width. + /// @brief Sign extend to a new width. + APInt &sext(unsigned width); + + /// This operation zero extends the APInt to a new width. The high order bits + /// are filled with 0 bits. It is an error to specify a width that is less + /// than or equal to the current width. + /// @brief Zero extend to a new width. + APInt &zext(unsigned width); + + /// Make this APInt have the bit width given by \p width. The value is sign + /// extended, truncated, or left alone to make it that width. + /// @brief Sign extend or truncate to width + APInt &sextOrTrunc(unsigned width); + + /// Make this APInt have the bit width given by \p width. The value is zero + /// extended, truncated, or left alone to make it that width. + /// @brief Zero extend or truncate to width + APInt &zextOrTrunc(unsigned width); + + /// @} + /// @name Bit Manipulation Operators + /// @{ + /// @brief Set every bit to 1. + APInt& set() { + if (isSingleWord()) { + VAL = -1ULL; + return clearUnusedBits(); + } + + // Set all the bits in all the words. + for (unsigned i = 0; i < getNumWords(); ++i) + pVal[i] = -1ULL; + // Clear the unused ones + return clearUnusedBits(); + } + + /// Set the given bit to 1 whose position is given as "bitPosition". + /// @brief Set a given bit to 1. + APInt& set(unsigned bitPosition); + + /// @brief Set every bit to 0. + APInt& clear() { + if (isSingleWord()) + VAL = 0; + else + memset(pVal, 0, getNumWords() * APINT_WORD_SIZE); + return *this; + } + + /// Set the given bit to 0 whose position is given as "bitPosition". + /// @brief Set a given bit to 0. + APInt& clear(unsigned bitPosition); + + /// @brief Toggle every bit to its opposite value. + APInt& flip() { + if (isSingleWord()) { + VAL ^= -1ULL; + return clearUnusedBits(); + } + for (unsigned i = 0; i < getNumWords(); ++i) + pVal[i] ^= -1ULL; + return clearUnusedBits(); + } + + /// Toggle a given bit to its opposite value whose position is given + /// as "bitPosition". + /// @brief Toggles a given bit to its opposite value. + APInt& flip(unsigned bitPosition); + + /// @} + /// @name Value Characterization Functions + /// @{ + + /// @returns the total number of bits. + unsigned getBitWidth() const { + return BitWidth; + } + + /// Here one word's bitwidth equals to that of uint64_t. + /// @returns the number of words to hold the integer value of this APInt. + /// @brief Get the number of words. + unsigned getNumWords() const { + return getNumWords(BitWidth); + } + + /// Here one word's bitwidth equals to that of uint64_t. + /// @returns the number of words to hold the integer value with a + /// given bit width. + /// @brief Get the number of words. + static unsigned getNumWords(unsigned BitWidth) { + return (BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; + } + + /// This function returns the number of active bits which is defined as the + /// bit width minus the number of leading zeros. This is used in several + /// computations to see how "wide" the value is. + /// @brief Compute the number of active bits in the value + unsigned getActiveBits() const { + return BitWidth - countLeadingZeros(); + } + + /// This function returns the number of active words in the value of this + /// APInt. This is used in conjunction with getActiveData to extract the raw + /// value of the APInt. + unsigned getActiveWords() const { + return whichWord(getActiveBits()-1) + 1; + } + + /// Computes the minimum bit width for this APInt while considering it to be + /// a signed (and probably negative) value. If the value is not negative, + /// this function returns the same value as getActiveBits()+1. Otherwise, it + /// returns the smallest bit width that will retain the negative value. For + /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so + /// for -1, this function will always return 1. + /// @brief Get the minimum bit size for this signed APInt + unsigned getMinSignedBits() const { + if (isNegative()) + return BitWidth - countLeadingOnes() + 1; + return getActiveBits()+1; + } + + /// This method attempts to return the value of this APInt as a zero extended + /// uint64_t. The bitwidth must be <= 64 or the value must fit within a + /// uint64_t. Otherwise an assertion will result. + /// @brief Get zero extended value + uint64_t getZExtValue() const { + if (isSingleWord()) + return VAL; + assert(getActiveBits() <= 64 && "Too many bits for uint64_t"); + return pVal[0]; + } + + /// This method attempts to return the value of this APInt as a sign extended + /// int64_t. The bit width must be <= 64 or the value must fit within an + /// int64_t. Otherwise an assertion will result. + /// @brief Get sign extended value + int64_t getSExtValue() const { + if (isSingleWord()) + return int64_t(VAL << (APINT_BITS_PER_WORD - BitWidth)) >> + (APINT_BITS_PER_WORD - BitWidth); + assert(getMinSignedBits() <= 64 && "Too many bits for int64_t"); + return int64_t(pVal[0]); + } + + /// This method determines how many bits are required to hold the APInt + /// equivalent of the string given by \p str of length \p slen. + /// @brief Get bits required for string value. + static unsigned getBitsNeeded(const char* str, unsigned slen, uint8_t radix); + + /// countLeadingZeros - This function is an APInt version of the + /// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number + /// of zeros from the most significant bit to the first one bit. + /// @returns BitWidth if the value is zero. + /// @returns the number of zeros from the most significant bit to the first + /// one bits. + unsigned countLeadingZeros() const { + if (isSingleWord()) { + unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth; + return CountLeadingZeros_64(VAL) - unusedBits; + } + return countLeadingZerosSlowCase(); + } + + /// countLeadingOnes - This function is an APInt version of the + /// countLeadingOnes_{32,64} functions in MathExtras.h. It counts the number + /// of ones from the most significant bit to the first zero bit. + /// @returns 0 if the high order bit is not set + /// @returns the number of 1 bits from the most significant to the least + /// @brief Count the number of leading one bits. + unsigned countLeadingOnes() const; + + /// countTrailingZeros - This function is an APInt version of the + /// countTrailingZeros_{32,64} functions in MathExtras.h. It counts + /// the number of zeros from the least significant bit to the first set bit. + /// @returns BitWidth if the value is zero. + /// @returns the number of zeros from the least significant bit to the first + /// one bit. + /// @brief Count the number of trailing zero bits. + unsigned countTrailingZeros() const; + + /// countTrailingOnes - This function is an APInt version of the + /// countTrailingOnes_{32,64} functions in MathExtras.h. It counts + /// the number of ones from the least significant bit to the first zero bit. + /// @returns BitWidth if the value is all ones. + /// @returns the number of ones from the least significant bit to the first + /// zero bit. + /// @brief Count the number of trailing one bits. + unsigned countTrailingOnes() const { + if (isSingleWord()) + return CountTrailingOnes_64(VAL); + return countTrailingOnesSlowCase(); + } + + /// countPopulation - This function is an APInt version of the + /// countPopulation_{32,64} functions in MathExtras.h. It counts the number + /// of 1 bits in the APInt value. + /// @returns 0 if the value is zero. + /// @returns the number of set bits. + /// @brief Count the number of bits set. + unsigned countPopulation() const { + if (isSingleWord()) + return CountPopulation_64(VAL); + return countPopulationSlowCase(); + } + + /// @} + /// @name Conversion Functions + /// @{ + void print(raw_ostream &OS, bool isSigned) const; + + /// toString - Converts an APInt to a string and append it to Str. Str is + /// commonly a SmallString. + void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed) const; + + /// Considers the APInt to be unsigned and converts it into a string in the + /// radix given. The radix can be 2, 8, 10 or 16. + void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { + toString(Str, Radix, false); + } + + /// Considers the APInt to be signed and converts it into a string in the + /// radix given. The radix can be 2, 8, 10 or 16. + void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { + toString(Str, Radix, true); + } + + /// toString - This returns the APInt as a std::string. Note that this is an + /// inefficient method. It is better to pass in a SmallVector/SmallString + /// to the methods above to avoid thrashing the heap for the string. + std::string toString(unsigned Radix, bool Signed) const; + + + /// @returns a byte-swapped representation of this APInt Value. + APInt byteSwap() const; + + /// @brief Converts this APInt to a double value. + double roundToDouble(bool isSigned) const; + + /// @brief Converts this unsigned APInt to a double value. + double roundToDouble() const { + return roundToDouble(false); + } + + /// @brief Converts this signed APInt to a double value. + double signedRoundToDouble() const { + return roundToDouble(true); + } + + /// The conversion does not do a translation from integer to double, it just + /// re-interprets the bits as a double. Note that it is valid to do this on + /// any bit width. Exactly 64 bits will be translated. + /// @brief Converts APInt bits to a double + double bitsToDouble() const { + union { + uint64_t I; + double D; + } T; + T.I = (isSingleWord() ? VAL : pVal[0]); + return T.D; + } + + /// The conversion does not do a translation from integer to float, it just + /// re-interprets the bits as a float. Note that it is valid to do this on + /// any bit width. Exactly 32 bits will be translated. + /// @brief Converts APInt bits to a double + float bitsToFloat() const { + union { + unsigned I; + float F; + } T; + T.I = unsigned((isSingleWord() ? VAL : pVal[0])); + return T.F; + } + + /// The conversion does not do a translation from double to integer, it just + /// re-interprets the bits of the double. Note that it is valid to do this on + /// any bit width but bits from V may get truncated. + /// @brief Converts a double to APInt bits. + APInt& doubleToBits(double V) { + union { + uint64_t I; + double D; + } T; + T.D = V; + if (isSingleWord()) + VAL = T.I; + else + pVal[0] = T.I; + return clearUnusedBits(); + } + + /// The conversion does not do a translation from float to integer, it just + /// re-interprets the bits of the float. Note that it is valid to do this on + /// any bit width but bits from V may get truncated. + /// @brief Converts a float to APInt bits. + APInt& floatToBits(float V) { + union { + unsigned I; + float F; + } T; + T.F = V; + if (isSingleWord()) + VAL = T.I; + else + pVal[0] = T.I; + return clearUnusedBits(); + } + + /// @} + /// @name Mathematics Operations + /// @{ + + /// @returns the floor log base 2 of this APInt. + unsigned logBase2() const { + return BitWidth - 1 - countLeadingZeros(); + } + + /// @returns the log base 2 of this APInt if its an exact power of two, -1 + /// otherwise + int32_t exactLogBase2() const { + if (!isPowerOf2()) + return -1; + return logBase2(); + } + + /// @brief Compute the square root + APInt sqrt() const; + + /// If *this is < 0 then return -(*this), otherwise *this; + /// @brief Get the absolute value; + APInt abs() const { + if (isNegative()) + return -(*this); + return *this; + } + + /// @returns the multiplicative inverse for a given modulo. + APInt multiplicativeInverse(const APInt& modulo) const; + + /// @} + /// @name Support for division by constant + /// @{ + + /// Calculate the magic number for signed division by a constant. + struct ms; + ms magic() const; + + /// Calculate the magic number for unsigned division by a constant. + struct mu; + mu magicu() const; + + /// @} + /// @name Building-block Operations for APInt and APFloat + /// @{ + + // These building block operations operate on a representation of + // arbitrary precision, two's-complement, bignum integer values. + // They should be sufficient to implement APInt and APFloat bignum + // requirements. Inputs are generally a pointer to the base of an + // array of integer parts, representing an unsigned bignum, and a + // count of how many parts there are. + + /// Sets the least significant part of a bignum to the input value, + /// and zeroes out higher parts. */ + static void tcSet(integerPart *, integerPart, unsigned int); + + /// Assign one bignum to another. + static void tcAssign(integerPart *, const integerPart *, unsigned int); + + /// Returns true if a bignum is zero, false otherwise. + static bool tcIsZero(const integerPart *, unsigned int); + + /// Extract the given bit of a bignum; returns 0 or 1. Zero-based. + static int tcExtractBit(const integerPart *, unsigned int bit); + + /// Copy the bit vector of width srcBITS from SRC, starting at bit + /// srcLSB, to DST, of dstCOUNT parts, such that the bit srcLSB + /// becomes the least significant bit of DST. All high bits above + /// srcBITS in DST are zero-filled. + static void tcExtract(integerPart *, unsigned int dstCount, + const integerPart *, + unsigned int srcBits, unsigned int srcLSB); + + /// Set the given bit of a bignum. Zero-based. + static void tcSetBit(integerPart *, unsigned int bit); + + /// Returns the bit number of the least or most significant set bit + /// of a number. If the input number has no bits set -1U is + /// returned. + static unsigned int tcLSB(const integerPart *, unsigned int); + static unsigned int tcMSB(const integerPart *parts, unsigned int n); + + /// Negate a bignum in-place. + static void tcNegate(integerPart *, unsigned int); + + /// DST += RHS + CARRY where CARRY is zero or one. Returns the + /// carry flag. + static integerPart tcAdd(integerPart *, const integerPart *, + integerPart carry, unsigned); + + /// DST -= RHS + CARRY where CARRY is zero or one. Returns the + /// carry flag. + static integerPart tcSubtract(integerPart *, const integerPart *, + integerPart carry, unsigned); + + /// DST += SRC * MULTIPLIER + PART if add is true + /// DST = SRC * MULTIPLIER + PART if add is false + /// + /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC + /// they must start at the same point, i.e. DST == SRC. + /// + /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is + /// returned. Otherwise DST is filled with the least significant + /// DSTPARTS parts of the result, and if all of the omitted higher + /// parts were zero return zero, otherwise overflow occurred and + /// return one. + static int tcMultiplyPart(integerPart *dst, const integerPart *src, + integerPart multiplier, integerPart carry, + unsigned int srcParts, unsigned int dstParts, + bool add); + + /// DST = LHS * RHS, where DST has the same width as the operands + /// and is filled with the least significant parts of the result. + /// Returns one if overflow occurred, otherwise zero. DST must be + /// disjoint from both operands. + static int tcMultiply(integerPart *, const integerPart *, + const integerPart *, unsigned); + + /// DST = LHS * RHS, where DST has width the sum of the widths of + /// the operands. No overflow occurs. DST must be disjoint from + /// both operands. Returns the number of parts required to hold the + /// result. + static unsigned int tcFullMultiply(integerPart *, const integerPart *, + const integerPart *, unsigned, unsigned); + + /// If RHS is zero LHS and REMAINDER are left unchanged, return one. + /// Otherwise set LHS to LHS / RHS with the fractional part + /// discarded, set REMAINDER to the remainder, return zero. i.e. + /// + /// OLD_LHS = RHS * LHS + REMAINDER + /// + /// SCRATCH is a bignum of the same size as the operands and result + /// for use by the routine; its contents need not be initialized + /// and are destroyed. LHS, REMAINDER and SCRATCH must be + /// distinct. + static int tcDivide(integerPart *lhs, const integerPart *rhs, + integerPart *remainder, integerPart *scratch, + unsigned int parts); + + /// Shift a bignum left COUNT bits. Shifted in bits are zero. + /// There are no restrictions on COUNT. + static void tcShiftLeft(integerPart *, unsigned int parts, + unsigned int count); + + /// Shift a bignum right COUNT bits. Shifted in bits are zero. + /// There are no restrictions on COUNT. + static void tcShiftRight(integerPart *, unsigned int parts, + unsigned int count); + + /// The obvious AND, OR and XOR and complement operations. + static void tcAnd(integerPart *, const integerPart *, unsigned int); + static void tcOr(integerPart *, const integerPart *, unsigned int); + static void tcXor(integerPart *, const integerPart *, unsigned int); + static void tcComplement(integerPart *, unsigned int); + + /// Comparison (unsigned) of two bignums. + static int tcCompare(const integerPart *, const integerPart *, + unsigned int); + + /// Increment a bignum in-place. Return the carry flag. + static integerPart tcIncrement(integerPart *, unsigned int); + + /// Set the least significant BITS and clear the rest. + static void tcSetLeastSignificantBits(integerPart *, unsigned int, + unsigned int bits); + + /// @brief debug method + void dump() const; + + /// @} +}; + +/// Magic data for optimising signed division by a constant. +struct APInt::ms { + APInt m; ///< magic number + unsigned s; ///< shift amount +}; + +/// Magic data for optimising unsigned division by a constant. +struct APInt::mu { + APInt m; ///< magic number + bool a; ///< add indicator + unsigned s; ///< shift amount +}; + +inline bool operator==(uint64_t V1, const APInt& V2) { + return V2 == V1; +} + +inline bool operator!=(uint64_t V1, const APInt& V2) { + return V2 != V1; +} + +inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) { + I.print(OS, true); + return OS; +} + +namespace APIntOps { + +/// @brief Determine the smaller of two APInts considered to be signed. +inline APInt smin(const APInt &A, const APInt &B) { + return A.slt(B) ? A : B; +} + +/// @brief Determine the larger of two APInts considered to be signed. +inline APInt smax(const APInt &A, const APInt &B) { + return A.sgt(B) ? A : B; +} + +/// @brief Determine the smaller of two APInts considered to be signed. +inline APInt umin(const APInt &A, const APInt &B) { + return A.ult(B) ? A : B; +} + +/// @brief Determine the larger of two APInts considered to be unsigned. +inline APInt umax(const APInt &A, const APInt &B) { + return A.ugt(B) ? A : B; +} + +/// @brief Check if the specified APInt has a N-bits unsigned integer value. +inline bool isIntN(unsigned N, const APInt& APIVal) { + return APIVal.isIntN(N); +} + +/// @brief Check if the specified APInt has a N-bits signed integer value. +inline bool isSignedIntN(unsigned N, const APInt& APIVal) { + return APIVal.isSignedIntN(N); +} + +/// @returns true if the argument APInt value is a sequence of ones +/// starting at the least significant bit with the remainder zero. +inline bool isMask(unsigned numBits, const APInt& APIVal) { + return numBits <= APIVal.getBitWidth() && + APIVal == APInt::getLowBitsSet(APIVal.getBitWidth(), numBits); +} + +/// @returns true if the argument APInt value contains a sequence of ones +/// with the remainder zero. +inline bool isShiftedMask(unsigned numBits, const APInt& APIVal) { + return isMask(numBits, (APIVal - APInt(numBits,1)) | APIVal); +} + +/// @returns a byte-swapped representation of the specified APInt Value. +inline APInt byteSwap(const APInt& APIVal) { + return APIVal.byteSwap(); +} + +/// @returns the floor log base 2 of the specified APInt value. +inline unsigned logBase2(const APInt& APIVal) { + return APIVal.logBase2(); +} + +/// GreatestCommonDivisor - This function returns the greatest common +/// divisor of the two APInt values using Euclid's algorithm. +/// @returns the greatest common divisor of Val1 and Val2 +/// @brief Compute GCD of two APInt values. +APInt GreatestCommonDivisor(const APInt& Val1, const APInt& Val2); + +/// Treats the APInt as an unsigned value for conversion purposes. +/// @brief Converts the given APInt to a double value. +inline double RoundAPIntToDouble(const APInt& APIVal) { + return APIVal.roundToDouble(); +} + +/// Treats the APInt as a signed value for conversion purposes. +/// @brief Converts the given APInt to a double value. +inline double RoundSignedAPIntToDouble(const APInt& APIVal) { + return APIVal.signedRoundToDouble(); +} + +/// @brief Converts the given APInt to a float vlalue. +inline float RoundAPIntToFloat(const APInt& APIVal) { + return float(RoundAPIntToDouble(APIVal)); +} + +/// Treast the APInt as a signed value for conversion purposes. +/// @brief Converts the given APInt to a float value. +inline float RoundSignedAPIntToFloat(const APInt& APIVal) { + return float(APIVal.signedRoundToDouble()); +} + +/// RoundDoubleToAPInt - This function convert a double value to an APInt value. +/// @brief Converts the given double value into a APInt. +APInt RoundDoubleToAPInt(double Double, unsigned width); + +/// RoundFloatToAPInt - Converts a float value into an APInt value. +/// @brief Converts a float value into a APInt. +inline APInt RoundFloatToAPInt(float Float, unsigned width) { + return RoundDoubleToAPInt(double(Float), width); +} + +/// Arithmetic right-shift the APInt by shiftAmt. +/// @brief Arithmetic right-shift function. +inline APInt ashr(const APInt& LHS, unsigned shiftAmt) { + return LHS.ashr(shiftAmt); +} + +/// Logical right-shift the APInt by shiftAmt. +/// @brief Logical right-shift function. +inline APInt lshr(const APInt& LHS, unsigned shiftAmt) { + return LHS.lshr(shiftAmt); +} + +/// Left-shift the APInt by shiftAmt. +/// @brief Left-shift function. +inline APInt shl(const APInt& LHS, unsigned shiftAmt) { + return LHS.shl(shiftAmt); +} + +/// Signed divide APInt LHS by APInt RHS. +/// @brief Signed division function for APInt. +inline APInt sdiv(const APInt& LHS, const APInt& RHS) { + return LHS.sdiv(RHS); +} + +/// Unsigned divide APInt LHS by APInt RHS. +/// @brief Unsigned division function for APInt. +inline APInt udiv(const APInt& LHS, const APInt& RHS) { + return LHS.udiv(RHS); +} + +/// Signed remainder operation on APInt. +/// @brief Function for signed remainder operation. +inline APInt srem(const APInt& LHS, const APInt& RHS) { + return LHS.srem(RHS); +} + +/// Unsigned remainder operation on APInt. +/// @brief Function for unsigned remainder operation. +inline APInt urem(const APInt& LHS, const APInt& RHS) { + return LHS.urem(RHS); +} + +/// Performs multiplication on APInt values. +/// @brief Function for multiplication operation. +inline APInt mul(const APInt& LHS, const APInt& RHS) { + return LHS * RHS; +} + +/// Performs addition on APInt values. +/// @brief Function for addition operation. +inline APInt add(const APInt& LHS, const APInt& RHS) { + return LHS + RHS; +} + +/// Performs subtraction on APInt values. +/// @brief Function for subtraction operation. +inline APInt sub(const APInt& LHS, const APInt& RHS) { + return LHS - RHS; +} + +/// Performs bitwise AND operation on APInt LHS and +/// APInt RHS. +/// @brief Bitwise AND function for APInt. +inline APInt And(const APInt& LHS, const APInt& RHS) { + return LHS & RHS; +} + +/// Performs bitwise OR operation on APInt LHS and APInt RHS. +/// @brief Bitwise OR function for APInt. +inline APInt Or(const APInt& LHS, const APInt& RHS) { + return LHS | RHS; +} + +/// Performs bitwise XOR operation on APInt. +/// @brief Bitwise XOR function for APInt. +inline APInt Xor(const APInt& LHS, const APInt& RHS) { + return LHS ^ RHS; +} + +/// Performs a bitwise complement operation on APInt. +/// @brief Bitwise complement function. +inline APInt Not(const APInt& APIVal) { + return ~APIVal; +} + +} // End of APIntOps namespace + +} // End of llvm namespace + +#endif diff --git a/include/llvm/ADT/APSInt.h b/include/llvm/ADT/APSInt.h new file mode 100644 index 0000000..1c9931c --- /dev/null +++ b/include/llvm/ADT/APSInt.h @@ -0,0 +1,264 @@ +//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the APSInt class, which is a simple class that +// represents an arbitrary sized integer that knows its signedness. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_APSINT_H +#define LLVM_APSINT_H + +#include "llvm/ADT/APInt.h" + +namespace llvm { + +class APSInt : public APInt { + bool IsUnsigned; +public: + /// Default constructor that creates an uninitialized APInt. + explicit APSInt() {} + + /// APSInt ctor - Create an APSInt with the specified width, default to + /// unsigned. + explicit APSInt(uint32_t BitWidth, bool isUnsigned = true) + : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {} + + explicit APSInt(const APInt &I, bool isUnsigned = true) + : APInt(I), IsUnsigned(isUnsigned) {} + + APSInt &operator=(const APSInt &RHS) { + APInt::operator=(RHS); + IsUnsigned = RHS.IsUnsigned; + return *this; + } + + APSInt &operator=(const APInt &RHS) { + // Retain our current sign. + APInt::operator=(RHS); + return *this; + } + + APSInt &operator=(uint64_t RHS) { + // Retain our current sign. + APInt::operator=(RHS); + return *this; + } + + // Query sign information. + bool isSigned() const { return !IsUnsigned; } + bool isUnsigned() const { return IsUnsigned; } + void setIsUnsigned(bool Val) { IsUnsigned = Val; } + void setIsSigned(bool Val) { IsUnsigned = !Val; } + + /// toString - Append this APSInt to the specified SmallString. + void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { + APInt::toString(Str, Radix, isSigned()); + } + /// toString - Converts an APInt to a std::string. This is an inefficient + /// method, your should prefer passing in a SmallString instead. + std::string toString(unsigned Radix) const { + return APInt::toString(Radix, isSigned()); + } + using APInt::toString; + + APSInt& extend(uint32_t width) { + if (IsUnsigned) + zext(width); + else + sext(width); + return *this; + } + + APSInt& extOrTrunc(uint32_t width) { + if (IsUnsigned) + zextOrTrunc(width); + else + sextOrTrunc(width); + return *this; + } + + const APSInt &operator%=(const APSInt &RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + if (IsUnsigned) + *this = urem(RHS); + else + *this = srem(RHS); + return *this; + } + const APSInt &operator/=(const APSInt &RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + if (IsUnsigned) + *this = udiv(RHS); + else + *this = sdiv(RHS); + return *this; + } + APSInt operator%(const APSInt &RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false); + } + APSInt operator/(const APSInt &RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false); + } + + APSInt operator>>(unsigned Amt) const { + return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false); + } + APSInt& operator>>=(unsigned Amt) { + *this = *this >> Amt; + return *this; + } + + inline bool operator<(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return IsUnsigned ? ult(RHS) : slt(RHS); + } + inline bool operator>(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return IsUnsigned ? ugt(RHS) : sgt(RHS); + } + inline bool operator<=(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return IsUnsigned ? ule(RHS) : sle(RHS); + } + inline bool operator>=(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return IsUnsigned ? uge(RHS) : sge(RHS); + } + + // The remaining operators just wrap the logic of APInt, but retain the + // signedness information. + + APSInt operator<<(unsigned Bits) const { + return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned); + } + APSInt& operator<<=(unsigned Amt) { + *this = *this << Amt; + return *this; + } + + APSInt& operator++() { + static_cast<APInt&>(*this)++; + return *this; + } + APSInt& operator--() { + static_cast<APInt&>(*this)--; + return *this; + } + APSInt operator++(int) { + return APSInt(++static_cast<APInt&>(*this), IsUnsigned); + } + APSInt operator--(int) { + return APSInt(--static_cast<APInt&>(*this), IsUnsigned); + } + APSInt operator-() const { + return APSInt(-static_cast<const APInt&>(*this), IsUnsigned); + } + APSInt& operator+=(const APSInt& RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + static_cast<APInt&>(*this) += RHS; + return *this; + } + APSInt& operator-=(const APSInt& RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + static_cast<APInt&>(*this) -= RHS; + return *this; + } + APSInt& operator*=(const APSInt& RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + static_cast<APInt&>(*this) *= RHS; + return *this; + } + APSInt& operator&=(const APSInt& RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + static_cast<APInt&>(*this) &= RHS; + return *this; + } + APSInt& operator|=(const APSInt& RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + static_cast<APInt&>(*this) |= RHS; + return *this; + } + APSInt& operator^=(const APSInt& RHS) { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + static_cast<APInt&>(*this) ^= RHS; + return *this; + } + + APSInt operator&(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned); + } + APSInt And(const APSInt& RHS) const { + return this->operator&(RHS); + } + + APSInt operator|(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned); + } + APSInt Or(const APSInt& RHS) const { + return this->operator|(RHS); + } + + + APSInt operator^(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned); + } + APSInt Xor(const APSInt& RHS) const { + return this->operator^(RHS); + } + + APSInt operator*(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned); + } + APSInt operator+(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned); + } + APSInt operator-(const APSInt& RHS) const { + assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); + return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned); + } + APSInt operator~() const { + return APSInt(~static_cast<const APInt&>(*this), IsUnsigned); + } + + /// getMaxValue - Return the APSInt representing the maximum integer value + /// with the given bit width and signedness. + static APSInt getMaxValue(uint32_t numBits, bool Unsigned) { + return APSInt(Unsigned ? APInt::getMaxValue(numBits) + : APInt::getSignedMaxValue(numBits), Unsigned); + } + + /// getMinValue - Return the APSInt representing the minimum integer value + /// with the given bit width and signedness. + static APSInt getMinValue(uint32_t numBits, bool Unsigned) { + return APSInt(Unsigned ? APInt::getMinValue(numBits) + : APInt::getSignedMinValue(numBits), Unsigned); + } + + /// Profile - Used to insert APSInt objects, or objects that contain APSInt + /// objects, into FoldingSets. + void Profile(FoldingSetNodeID& ID) const; +}; + +inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) { + I.print(OS, I.isSigned()); + return OS; +} + + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h new file mode 100644 index 0000000..9c046ef --- /dev/null +++ b/include/llvm/ADT/BitVector.h @@ -0,0 +1,409 @@ +//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the BitVector class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_BITVECTOR_H +#define LLVM_ADT_BITVECTOR_H + +#include "llvm/Support/MathExtras.h" +#include <algorithm> +#include <cassert> +#include <climits> +#include <cstring> + +namespace llvm { + +class BitVector { + typedef unsigned long BitWord; + + enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT }; + + BitWord *Bits; // Actual bits. + unsigned Size; // Size of bitvector in bits. + unsigned Capacity; // Size of allocated memory in BitWord. + +public: + // Encapsulation of a single bit. + class reference { + friend class BitVector; + + BitWord *WordRef; + unsigned BitPos; + + reference(); // Undefined + + public: + reference(BitVector &b, unsigned Idx) { + WordRef = &b.Bits[Idx / BITWORD_SIZE]; + BitPos = Idx % BITWORD_SIZE; + } + + ~reference() {} + + reference& operator=(bool t) { + if (t) + *WordRef |= 1L << BitPos; + else + *WordRef &= ~(1L << BitPos); + return *this; + } + + operator bool() const { + return ((*WordRef) & (1L << BitPos)) ? true : false; + } + }; + + + /// BitVector default ctor - Creates an empty bitvector. + BitVector() : Size(0), Capacity(0) { + Bits = 0; + } + + /// BitVector ctor - Creates a bitvector of specified number of bits. All + /// bits are initialized to the specified value. + explicit BitVector(unsigned s, bool t = false) : Size(s) { + Capacity = NumBitWords(s); + Bits = new BitWord[Capacity]; + init_words(Bits, Capacity, t); + if (t) + clear_unused_bits(); + } + + /// BitVector copy ctor. + BitVector(const BitVector &RHS) : Size(RHS.size()) { + if (Size == 0) { + Bits = 0; + Capacity = 0; + return; + } + + Capacity = NumBitWords(RHS.size()); + Bits = new BitWord[Capacity]; + std::copy(RHS.Bits, &RHS.Bits[Capacity], Bits); + } + + ~BitVector() { + delete[] Bits; + } + + /// size - Returns the number of bits in this bitvector. + unsigned size() const { return Size; } + + /// count - Returns the number of bits which are set. + unsigned count() const { + unsigned NumBits = 0; + for (unsigned i = 0; i < NumBitWords(size()); ++i) + if (sizeof(BitWord) == 4) + NumBits += CountPopulation_32((uint32_t)Bits[i]); + else if (sizeof(BitWord) == 8) + NumBits += CountPopulation_64(Bits[i]); + else + assert(0 && "Unsupported!"); + return NumBits; + } + + /// any - Returns true if any bit is set. + bool any() const { + for (unsigned i = 0; i < NumBitWords(size()); ++i) + if (Bits[i] != 0) + return true; + return false; + } + + /// none - Returns true if none of the bits are set. + bool none() const { + return !any(); + } + + /// find_first - Returns the index of the first set bit, -1 if none + /// of the bits are set. + int find_first() const { + for (unsigned i = 0; i < NumBitWords(size()); ++i) + if (Bits[i] != 0) { + if (sizeof(BitWord) == 4) + return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]); + else if (sizeof(BitWord) == 8) + return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); + else + assert(0 && "Unsupported!"); + } + return -1; + } + + /// find_next - Returns the index of the next set bit following the + /// "Prev" bit. Returns -1 if the next set bit is not found. + int find_next(unsigned Prev) const { + ++Prev; + if (Prev >= Size) + return -1; + + unsigned WordPos = Prev / BITWORD_SIZE; + unsigned BitPos = Prev % BITWORD_SIZE; + BitWord Copy = Bits[WordPos]; + // Mask off previous bits. + Copy &= ~0L << BitPos; + + if (Copy != 0) { + if (sizeof(BitWord) == 4) + return WordPos * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Copy); + else if (sizeof(BitWord) == 8) + return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy); + else + assert(0 && "Unsupported!"); + } + + // Check subsequent words. + for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i) + if (Bits[i] != 0) { + if (sizeof(BitWord) == 4) + return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]); + else if (sizeof(BitWord) == 8) + return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); + else + assert(0 && "Unsupported!"); + } + return -1; + } + + /// clear - Clear all bits. + void clear() { + Size = 0; + } + + /// resize - Grow or shrink the bitvector. + void resize(unsigned N, bool t = false) { + if (N > Capacity * BITWORD_SIZE) { + unsigned OldCapacity = Capacity; + grow(N); + init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t); + } + + // Set any old unused bits that are now included in the BitVector. This + // may set bits that are not included in the new vector, but we will clear + // them back out below. + if (N > Size) + set_unused_bits(t); + + // Update the size, and clear out any bits that are now unused + unsigned OldSize = Size; + Size = N; + if (t || N < OldSize) + clear_unused_bits(); + } + + void reserve(unsigned N) { + if (N > Capacity * BITWORD_SIZE) + grow(N); + } + + // Set, reset, flip + BitVector &set() { + init_words(Bits, Capacity, true); + clear_unused_bits(); + return *this; + } + + BitVector &set(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE); + return *this; + } + + BitVector &reset() { + init_words(Bits, Capacity, false); + return *this; + } + + BitVector &reset(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE)); + return *this; + } + + BitVector &flip() { + for (unsigned i = 0; i < NumBitWords(size()); ++i) + Bits[i] = ~Bits[i]; + clear_unused_bits(); + return *this; + } + + BitVector &flip(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] ^= 1L << (Idx % BITWORD_SIZE); + return *this; + } + + // No argument flip. + BitVector operator~() const { + return BitVector(*this).flip(); + } + + // Indexing. + reference operator[](unsigned Idx) { + assert (Idx < Size && "Out-of-bounds Bit access."); + return reference(*this, Idx); + } + + bool operator[](unsigned Idx) const { + assert (Idx < Size && "Out-of-bounds Bit access."); + BitWord Mask = 1L << (Idx % BITWORD_SIZE); + return (Bits[Idx / BITWORD_SIZE] & Mask) != 0; + } + + bool test(unsigned Idx) const { + return (*this)[Idx]; + } + + // Comparison operators. + bool operator==(const BitVector &RHS) const { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + if (Bits[i] != RHS.Bits[i]) + return false; + + // Verify that any extra words are all zeros. + if (i != ThisWords) { + for (; i != ThisWords; ++i) + if (Bits[i]) + return false; + } else if (i != RHSWords) { + for (; i != RHSWords; ++i) + if (RHS.Bits[i]) + return false; + } + return true; + } + + bool operator!=(const BitVector &RHS) const { + return !(*this == RHS); + } + + // Intersection, union, disjoint union. + BitVector &operator&=(const BitVector &RHS) { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + Bits[i] &= RHS.Bits[i]; + + // Any bits that are just in this bitvector become zero, because they aren't + // in the RHS bit vector. Any words only in RHS are ignored because they + // are already zero in the LHS. + for (; i != ThisWords; ++i) + Bits[i] = 0; + + return *this; + } + + BitVector &operator|=(const BitVector &RHS) { + assert(Size == RHS.Size && "Illegal operation!"); + for (unsigned i = 0; i < NumBitWords(size()); ++i) + Bits[i] |= RHS.Bits[i]; + return *this; + } + + BitVector &operator^=(const BitVector &RHS) { + assert(Size == RHS.Size && "Illegal operation!"); + for (unsigned i = 0; i < NumBitWords(size()); ++i) + Bits[i] ^= RHS.Bits[i]; + return *this; + } + + // Assignment operator. + const BitVector &operator=(const BitVector &RHS) { + if (this == &RHS) return *this; + + Size = RHS.size(); + unsigned RHSWords = NumBitWords(Size); + if (Size <= Capacity * BITWORD_SIZE) { + std::copy(RHS.Bits, &RHS.Bits[RHSWords], Bits); + clear_unused_bits(); + return *this; + } + + // Grow the bitvector to have enough elements. + Capacity = RHSWords; + BitWord *NewBits = new BitWord[Capacity]; + std::copy(RHS.Bits, &RHS.Bits[RHSWords], NewBits); + + // Destroy the old bits. + delete[] Bits; + Bits = NewBits; + + return *this; + } + +private: + unsigned NumBitWords(unsigned S) const { + return (S + BITWORD_SIZE-1) / BITWORD_SIZE; + } + + // Set the unused bits in the high words. + void set_unused_bits(bool t = true) { + // Set high words first. + unsigned UsedWords = NumBitWords(Size); + if (Capacity > UsedWords) + init_words(&Bits[UsedWords], (Capacity-UsedWords), t); + + // Then set any stray high bits of the last used word. + unsigned ExtraBits = Size % BITWORD_SIZE; + if (ExtraBits) { + Bits[UsedWords-1] &= ~(~0L << ExtraBits); + Bits[UsedWords-1] |= (0 - (BitWord)t) << ExtraBits; + } + } + + // Clear the unused bits in the high words. + void clear_unused_bits() { + set_unused_bits(false); + } + + void grow(unsigned NewSize) { + unsigned OldCapacity = Capacity; + Capacity = NumBitWords(NewSize); + BitWord *NewBits = new BitWord[Capacity]; + + // Copy the old bits over. + if (OldCapacity != 0) + std::copy(Bits, &Bits[OldCapacity], NewBits); + + // Destroy the old bits. + delete[] Bits; + Bits = NewBits; + + clear_unused_bits(); + } + + void init_words(BitWord *B, unsigned NumWords, bool t) { + memset(B, 0 - (int)t, NumWords*sizeof(BitWord)); + } +}; + +inline BitVector operator&(const BitVector &LHS, const BitVector &RHS) { + BitVector Result(LHS); + Result &= RHS; + return Result; +} + +inline BitVector operator|(const BitVector &LHS, const BitVector &RHS) { + BitVector Result(LHS); + Result |= RHS; + return Result; +} + +inline BitVector operator^(const BitVector &LHS, const BitVector &RHS) { + BitVector Result(LHS); + Result ^= RHS; + return Result; +} + +} // End llvm namespace +#endif diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h new file mode 100644 index 0000000..e18be89 --- /dev/null +++ b/include/llvm/ADT/DenseMap.h @@ -0,0 +1,570 @@ +//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseMap class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSEMAP_H +#define LLVM_ADT_DENSEMAP_H + +#include "llvm/Support/PointerLikeTypeTraits.h" +#include "llvm/Support/MathExtras.h" +#include <cassert> +#include <utility> +#include <new> + +namespace llvm { + +template<typename T> +struct DenseMapInfo { + //static inline T getEmptyKey(); + //static inline T getTombstoneKey(); + //static unsigned getHashValue(const T &Val); + //static bool isEqual(const T &LHS, const T &RHS); + //static bool isPod() +}; + +// Provide DenseMapInfo for all pointers. +template<typename T> +struct DenseMapInfo<T*> { + static inline T* getEmptyKey() { + intptr_t Val = -1; + Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable; + return reinterpret_cast<T*>(Val); + } + static inline T* getTombstoneKey() { + intptr_t Val = -2; + Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable; + return reinterpret_cast<T*>(Val); + } + static unsigned getHashValue(const T *PtrVal) { + return (unsigned((uintptr_t)PtrVal) >> 4) ^ + (unsigned((uintptr_t)PtrVal) >> 9); + } + static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } + static bool isPod() { return true; } +}; + +// Provide DenseMapInfo for chars. +template<> struct DenseMapInfo<char> { + static inline char getEmptyKey() { return ~0; } + static inline char getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const char& Val) { return Val * 37; } + static bool isPod() { return true; } + static bool isEqual(const char &LHS, const char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned ints. +template<> struct DenseMapInfo<unsigned> { + static inline unsigned getEmptyKey() { return ~0; } + static inline unsigned getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const unsigned& Val) { return Val * 37; } + static bool isPod() { return true; } + static bool isEqual(const unsigned& LHS, const unsigned& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned longs. +template<> struct DenseMapInfo<unsigned long> { + static inline unsigned long getEmptyKey() { return ~0L; } + static inline unsigned long getTombstoneKey() { return ~0L - 1L; } + static unsigned getHashValue(const unsigned long& Val) { + return (unsigned)(Val * 37L); + } + static bool isPod() { return true; } + static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for all pairs whose members have info. +template<typename T, typename U> +struct DenseMapInfo<std::pair<T, U> > { + typedef std::pair<T, U> Pair; + typedef DenseMapInfo<T> FirstInfo; + typedef DenseMapInfo<U> SecondInfo; + + static inline Pair getEmptyKey() { + return std::make_pair(FirstInfo::getEmptyKey(), + SecondInfo::getEmptyKey()); + } + static inline Pair getTombstoneKey() { + return std::make_pair(FirstInfo::getTombstoneKey(), + SecondInfo::getEmptyKey()); + } + static unsigned getHashValue(const Pair& PairVal) { + uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32 + | (uint64_t)SecondInfo::getHashValue(PairVal.second); + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return (unsigned)key; + } + static bool isEqual(const Pair& LHS, const Pair& RHS) { return LHS == RHS; } + static bool isPod() { return FirstInfo::isPod() && SecondInfo::isPod(); } +}; + +template<typename KeyT, typename ValueT, + typename KeyInfoT = DenseMapInfo<KeyT>, + typename ValueInfoT = DenseMapInfo<ValueT> > +class DenseMapIterator; +template<typename KeyT, typename ValueT, + typename KeyInfoT = DenseMapInfo<KeyT>, + typename ValueInfoT = DenseMapInfo<ValueT> > +class DenseMapConstIterator; + +template<typename KeyT, typename ValueT, + typename KeyInfoT = DenseMapInfo<KeyT>, + typename ValueInfoT = DenseMapInfo<ValueT> > +class DenseMap { + typedef std::pair<KeyT, ValueT> BucketT; + unsigned NumBuckets; + BucketT *Buckets; + + unsigned NumEntries; + unsigned NumTombstones; +public: + typedef KeyT key_type; + typedef ValueT mapped_type; + typedef BucketT value_type; + + DenseMap(const DenseMap& other) { + NumBuckets = 0; + CopyFrom(other); + } + + explicit DenseMap(unsigned NumInitBuckets = 64) { + init(NumInitBuckets); + } + + ~DenseMap() { + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->first, EmptyKey) && + !KeyInfoT::isEqual(P->first, TombstoneKey)) + P->second.~ValueT(); + P->first.~KeyT(); + } + operator delete(Buckets); + } + + typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator; + typedef DenseMapConstIterator<KeyT, ValueT, KeyInfoT> const_iterator; + inline iterator begin() { + return iterator(Buckets, Buckets+NumBuckets); + } + inline iterator end() { + return iterator(Buckets+NumBuckets, Buckets+NumBuckets); + } + inline const_iterator begin() const { + return const_iterator(Buckets, Buckets+NumBuckets); + } + inline const_iterator end() const { + return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets); + } + + bool empty() const { return NumEntries == 0; } + unsigned size() const { return NumEntries; } + + /// Grow the densemap so that it has at least Size buckets. Does not shrink + void resize(size_t Size) { grow(Size); } + + void clear() { + // If the capacity of the array is huge, and the # elements used is small, + // shrink the array. + if (NumEntries * 4 < NumBuckets && NumBuckets > 64) { + shrink_and_clear(); + return; + } + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->first, EmptyKey)) { + if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { + P->second.~ValueT(); + --NumEntries; + } + P->first = EmptyKey; + } + } + assert(NumEntries == 0 && "Node count imbalance!"); + NumTombstones = 0; + } + + /// count - Return true if the specified key is in the map. + bool count(const KeyT &Val) const { + BucketT *TheBucket; + return LookupBucketFor(Val, TheBucket); + } + + iterator find(const KeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return iterator(TheBucket, Buckets+NumBuckets); + return end(); + } + const_iterator find(const KeyT &Val) const { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return const_iterator(TheBucket, Buckets+NumBuckets); + return end(); + } + + /// lookup - Return the entry for the specified key, or a default + /// constructed value if no such entry exists. + ValueT lookup(const KeyT &Val) const { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return TheBucket->second; + return ValueT(); + } + + std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { + BucketT *TheBucket; + if (LookupBucketFor(KV.first, TheBucket)) + return std::make_pair(iterator(TheBucket, Buckets+NumBuckets), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); + return std::make_pair(iterator(TheBucket, Buckets+NumBuckets), + true); + } + + /// insert - Range insertion of pairs. + template<typename InputIt> + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } + + + bool erase(const KeyT &Val) { + BucketT *TheBucket; + if (!LookupBucketFor(Val, TheBucket)) + return false; // not in map. + + TheBucket->second.~ValueT(); + TheBucket->first = getTombstoneKey(); + --NumEntries; + ++NumTombstones; + return true; + } + bool erase(iterator I) { + BucketT *TheBucket = &*I; + TheBucket->second.~ValueT(); + TheBucket->first = getTombstoneKey(); + --NumEntries; + ++NumTombstones; + return true; + } + + value_type& FindAndConstruct(const KeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(Key, ValueT(), TheBucket); + } + + ValueT &operator[](const KeyT &Key) { + return FindAndConstruct(Key).second; + } + + DenseMap& operator=(const DenseMap& other) { + CopyFrom(other); + return *this; + } + + /// isPointerIntoBucketsArray - Return true if the specified pointer points + /// somewhere into the DenseMap's array of buckets (i.e. either to a key or + /// value in the DenseMap). + bool isPointerIntoBucketsArray(const void *Ptr) const { + return Ptr >= Buckets && Ptr < Buckets+NumBuckets; + } + + /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets + /// array. In conjunction with the previous method, this can be used to + /// determine whether an insertion caused the DenseMap to reallocate. + const void *getPointerIntoBucketsArray() const { return Buckets; } + +private: + void CopyFrom(const DenseMap& other) { + if (NumBuckets != 0 && (!KeyInfoT::isPod() || !ValueInfoT::isPod())) { + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->first, EmptyKey) && + !KeyInfoT::isEqual(P->first, TombstoneKey)) + P->second.~ValueT(); + P->first.~KeyT(); + } + } + + NumEntries = other.NumEntries; + NumTombstones = other.NumTombstones; + + if (NumBuckets) + operator delete(Buckets); + Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * + other.NumBuckets)); + + if (KeyInfoT::isPod() && ValueInfoT::isPod()) + memcpy(Buckets, other.Buckets, other.NumBuckets * sizeof(BucketT)); + else + for (size_t i = 0; i < other.NumBuckets; ++i) { + new (&Buckets[i].first) KeyT(other.Buckets[i].first); + if (!KeyInfoT::isEqual(Buckets[i].first, getEmptyKey()) && + !KeyInfoT::isEqual(Buckets[i].first, getTombstoneKey())) + new (&Buckets[i].second) ValueT(other.Buckets[i].second); + } + NumBuckets = other.NumBuckets; + } + + BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, + BucketT *TheBucket) { + // If the load of the hash table is more than 3/4, or if fewer than 1/8 of + // the buckets are empty (meaning that many are filled with tombstones), + // grow the table. + // + // The later case is tricky. For example, if we had one empty bucket with + // tons of tombstones, failing lookups (e.g. for insertion) would have to + // probe almost the entire table until it found the empty bucket. If the + // table completely filled with tombstones, no lookup would ever succeed, + // causing infinite loops in lookup. + ++NumEntries; + if (NumEntries*4 >= NumBuckets*3 || + NumBuckets-(NumEntries+NumTombstones) < NumBuckets/8) { + this->grow(NumBuckets * 2); + LookupBucketFor(Key, TheBucket); + } + + // If we are writing over a tombstone, remember this. + if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) + --NumTombstones; + + TheBucket->first = Key; + new (&TheBucket->second) ValueT(Value); + return TheBucket; + } + + static unsigned getHashValue(const KeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + static const KeyT getEmptyKey() { + return KeyInfoT::getEmptyKey(); + } + static const KeyT getTombstoneKey() { + return KeyInfoT::getTombstoneKey(); + } + + /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in + /// FoundBucket. If the bucket contains the key and a value, this returns + /// true, otherwise it returns a bucket with an empty marker or tombstone and + /// returns false. + bool LookupBucketFor(const KeyT &Val, BucketT *&FoundBucket) const { + unsigned BucketNo = getHashValue(Val); + unsigned ProbeAmt = 1; + BucketT *BucketsPtr = Buckets; + + // FoundTombstone - Keep track of whether we find a tombstone while probing. + BucketT *FoundTombstone = 0; + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + assert(!KeyInfoT::isEqual(Val, EmptyKey) && + !KeyInfoT::isEqual(Val, TombstoneKey) && + "Empty/Tombstone value shouldn't be inserted into map!"); + + while (1) { + BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1)); + // Found Val's bucket? If so, return it. + if (KeyInfoT::isEqual(ThisBucket->first, Val)) { + FoundBucket = ThisBucket; + return true; + } + + // If we found an empty bucket, the key doesn't exist in the set. + // Insert it and return the default value. + if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { + // If we've already seen a tombstone while probing, fill it in instead + // of the empty bucket we eventually probed to. + if (FoundTombstone) ThisBucket = FoundTombstone; + FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; + return false; + } + + // If this is a tombstone, remember it. If Val ends up not in the map, we + // prefer to return it than something that would require more probing. + if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) + FoundTombstone = ThisBucket; // Remember the first tombstone found. + + // Otherwise, it's a hash collision or a tombstone, continue quadratic + // probing. + BucketNo += ProbeAmt++; + } + } + + void init(unsigned InitBuckets) { + NumEntries = 0; + NumTombstones = 0; + NumBuckets = InitBuckets; + assert(InitBuckets && (InitBuckets & (InitBuckets-1)) == 0 && + "# initial buckets must be a power of two!"); + Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*InitBuckets)); + // Initialize all the keys to EmptyKey. + const KeyT EmptyKey = getEmptyKey(); + for (unsigned i = 0; i != InitBuckets; ++i) + new (&Buckets[i].first) KeyT(EmptyKey); + } + + void grow(unsigned AtLeast) { + unsigned OldNumBuckets = NumBuckets; + BucketT *OldBuckets = Buckets; + + // Double the number of buckets. + while (NumBuckets <= AtLeast) + NumBuckets <<= 1; + NumTombstones = 0; + Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets)); + + // Initialize all the keys to EmptyKey. + const KeyT EmptyKey = getEmptyKey(); + for (unsigned i = 0, e = NumBuckets; i != e; ++i) + new (&Buckets[i].first) KeyT(EmptyKey); + + // Insert all the old elements. + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) { + if (!KeyInfoT::isEqual(B->first, EmptyKey) && + !KeyInfoT::isEqual(B->first, TombstoneKey)) { + // Insert the key/value into the new table. + BucketT *DestBucket; + bool FoundVal = LookupBucketFor(B->first, DestBucket); + FoundVal = FoundVal; // silence warning. + assert(!FoundVal && "Key already in new map?"); + DestBucket->first = B->first; + new (&DestBucket->second) ValueT(B->second); + + // Free the value. + B->second.~ValueT(); + } + B->first.~KeyT(); + } + + // Free the old table. + operator delete(OldBuckets); + } + + void shrink_and_clear() { + unsigned OldNumBuckets = NumBuckets; + BucketT *OldBuckets = Buckets; + + // Reduce the number of buckets. + NumBuckets = NumEntries > 32 ? 1 << (Log2_32_Ceil(NumEntries) + 1) + : 64; + NumTombstones = 0; + Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets)); + + // Initialize all the keys to EmptyKey. + const KeyT EmptyKey = getEmptyKey(); + for (unsigned i = 0, e = NumBuckets; i != e; ++i) + new (&Buckets[i].first) KeyT(EmptyKey); + + // Free the old buckets. + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) { + if (!KeyInfoT::isEqual(B->first, EmptyKey) && + !KeyInfoT::isEqual(B->first, TombstoneKey)) { + // Free the value. + B->second.~ValueT(); + } + B->first.~KeyT(); + } + + // Free the old table. + operator delete(OldBuckets); + + NumEntries = 0; + } +}; + +template<typename KeyT, typename ValueT, typename KeyInfoT, typename ValueInfoT> +class DenseMapIterator { + typedef std::pair<KeyT, ValueT> BucketT; +protected: + const BucketT *Ptr, *End; +public: + DenseMapIterator(void) : Ptr(0), End(0) {} + + DenseMapIterator(const BucketT *Pos, const BucketT *E) : Ptr(Pos), End(E) { + AdvancePastEmptyBuckets(); + } + + std::pair<KeyT, ValueT> &operator*() const { + return *const_cast<BucketT*>(Ptr); + } + std::pair<KeyT, ValueT> *operator->() const { + return const_cast<BucketT*>(Ptr); + } + + bool operator==(const DenseMapIterator &RHS) const { + return Ptr == RHS.Ptr; + } + bool operator!=(const DenseMapIterator &RHS) const { + return Ptr != RHS.Ptr; + } + + inline DenseMapIterator& operator++() { // Preincrement + ++Ptr; + AdvancePastEmptyBuckets(); + return *this; + } + DenseMapIterator operator++(int) { // Postincrement + DenseMapIterator tmp = *this; ++*this; return tmp; + } + +private: + void AdvancePastEmptyBuckets() { + const KeyT Empty = KeyInfoT::getEmptyKey(); + const KeyT Tombstone = KeyInfoT::getTombstoneKey(); + + while (Ptr != End && + (KeyInfoT::isEqual(Ptr->first, Empty) || + KeyInfoT::isEqual(Ptr->first, Tombstone))) + ++Ptr; + } +}; + +template<typename KeyT, typename ValueT, typename KeyInfoT, typename ValueInfoT> +class DenseMapConstIterator : public DenseMapIterator<KeyT, ValueT, KeyInfoT> { +public: + DenseMapConstIterator(void) : DenseMapIterator<KeyT, ValueT, KeyInfoT>() {} + DenseMapConstIterator(const std::pair<KeyT, ValueT> *Pos, + const std::pair<KeyT, ValueT> *E) + : DenseMapIterator<KeyT, ValueT, KeyInfoT>(Pos, E) { + } + const std::pair<KeyT, ValueT> &operator*() const { + return *this->Ptr; + } + const std::pair<KeyT, ValueT> *operator->() const { + return this->Ptr; + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/DenseSet.h b/include/llvm/ADT/DenseSet.h new file mode 100644 index 0000000..ce7344b --- /dev/null +++ b/include/llvm/ADT/DenseSet.h @@ -0,0 +1,104 @@ +//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseSet class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSESET_H +#define LLVM_ADT_DENSESET_H + +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + +/// DenseSet - This implements a dense probed hash-table based set. +/// +/// FIXME: This is currently implemented directly in terms of DenseMap, this +/// should be optimized later if there is a need. +template<typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT> > +class DenseSet { + typedef DenseMap<ValueT, char, ValueInfoT> MapTy; + MapTy TheMap; +public: + DenseSet(const DenseSet &Other) : TheMap(Other.TheMap) {} + explicit DenseSet(unsigned NumInitBuckets = 64) : TheMap(NumInitBuckets) {} + + bool empty() const { return TheMap.empty(); } + unsigned size() const { return TheMap.size(); } + + void clear() { + TheMap.clear(); + } + + bool count(const ValueT &V) const { + return TheMap.count(V); + } + + void erase(const ValueT &V) { + TheMap.erase(V); + } + + DenseSet &operator=(const DenseSet &RHS) { + TheMap = RHS.TheMap; + return *this; + } + + // Iterators. + + class Iterator { + typename MapTy::iterator I; + public: + Iterator(const typename MapTy::iterator &i) : I(i) {} + + ValueT& operator*() { return I->first; } + ValueT* operator->() { return &I->first; } + + Iterator& operator++() { ++I; return *this; }; + bool operator==(const Iterator& X) const { return I == X.I; } + bool operator!=(const Iterator& X) const { return I != X.I; } + }; + + class ConstIterator { + typename MapTy::const_iterator I; + public: + ConstIterator(const typename MapTy::const_iterator &i) : I(i) {} + + const ValueT& operator*() { return I->first; } + const ValueT* operator->() { return &I->first; } + + ConstIterator& operator++() { ++I; return *this; }; + bool operator==(const ConstIterator& X) const { return I == X.I; } + bool operator!=(const ConstIterator& X) const { return I != X.I; } + }; + + typedef Iterator iterator; + typedef ConstIterator const_iterator; + + iterator begin() { return Iterator(TheMap.begin()); } + iterator end() { return Iterator(TheMap.end()); } + + const_iterator begin() const { return ConstIterator(TheMap.begin()); } + const_iterator end() const { return ConstIterator(TheMap.end()); } + + std::pair<iterator, bool> insert(const ValueT &V) { + return TheMap.insert(std::make_pair(V, 0)); + } + + // Range insertion of values. + template<typename InputIt> + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/DepthFirstIterator.h b/include/llvm/ADT/DepthFirstIterator.h new file mode 100644 index 0000000..517768f --- /dev/null +++ b/include/llvm/ADT/DepthFirstIterator.h @@ -0,0 +1,232 @@ +//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file builds on the ADT/GraphTraits.h file to build generic depth +// first graph iterator. This file exposes the following functions/types: +// +// df_begin/df_end/df_iterator +// * Normal depth-first iteration - visit a node and then all of its children. +// +// idf_begin/idf_end/idf_iterator +// * Depth-first iteration on the 'inverse' graph. +// +// df_ext_begin/df_ext_end/df_ext_iterator +// * Normal depth-first iteration - visit a node and then all of its children. +// This iterator stores the 'visited' set in an external set, which allows +// it to be more efficient, and allows external clients to use the set for +// other purposes. +// +// idf_ext_begin/idf_ext_end/idf_ext_iterator +// * Depth-first iteration on the 'inverse' graph. +// This iterator stores the 'visited' set in an external set, which allows +// it to be more efficient, and allows external clients to use the set for +// other purposes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H +#define LLVM_ADT_DEPTHFIRSTITERATOR_H + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/SmallPtrSet.h" +#include <set> +#include <vector> + +namespace llvm { + +// df_iterator_storage - A private class which is used to figure out where to +// store the visited set. +template<class SetType, bool External> // Non-external set +class df_iterator_storage { +public: + SetType Visited; +}; + +template<class SetType> +class df_iterator_storage<SetType, true> { +public: + df_iterator_storage(SetType &VSet) : Visited(VSet) {} + df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {} + SetType &Visited; +}; + + +// Generic Depth First Iterator +template<class GraphT, +class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>, + bool ExtStorage = false, class GT = GraphTraits<GraphT> > +class df_iterator : public forward_iterator<typename GT::NodeType, ptrdiff_t>, + public df_iterator_storage<SetType, ExtStorage> { + typedef forward_iterator<typename GT::NodeType, ptrdiff_t> super; + + typedef typename GT::NodeType NodeType; + typedef typename GT::ChildIteratorType ChildItTy; + + // VisitStack - Used to maintain the ordering. Top = current block + // First element is node pointer, second is the 'next child' to visit + std::vector<std::pair<NodeType *, ChildItTy> > VisitStack; +private: + inline df_iterator(NodeType *Node) { + this->Visited.insert(Node); + VisitStack.push_back(std::make_pair(Node, GT::child_begin(Node))); + } + inline df_iterator() { /* End is when stack is empty */ } + + inline df_iterator(NodeType *Node, SetType &S) + : df_iterator_storage<SetType, ExtStorage>(S) { + if (!S.count(Node)) { + this->Visited.insert(Node); + VisitStack.push_back(std::make_pair(Node, GT::child_begin(Node))); + } + } + inline df_iterator(SetType &S) + : df_iterator_storage<SetType, ExtStorage>(S) { + // End is when stack is empty + } + +public: + typedef typename super::pointer pointer; + typedef df_iterator<GraphT, SetType, ExtStorage, GT> _Self; + + // Provide static begin and end methods as our public "constructors" + static inline _Self begin(const GraphT& G) { + return _Self(GT::getEntryNode(G)); + } + static inline _Self end(const GraphT& G) { return _Self(); } + + // Static begin and end methods as our public ctors for external iterators + static inline _Self begin(const GraphT& G, SetType &S) { + return _Self(GT::getEntryNode(G), S); + } + static inline _Self end(const GraphT& G, SetType &S) { return _Self(S); } + + inline bool operator==(const _Self& x) const { + return VisitStack.size() == x.VisitStack.size() && + VisitStack == x.VisitStack; + } + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline pointer operator*() const { + return VisitStack.back().first; + } + + // This is a nonstandard operator-> that dereferences the pointer an extra + // time... so that you can actually call methods ON the Node, because + // the contained type is a pointer. This allows BBIt->getTerminator() f.e. + // + inline NodeType *operator->() const { return operator*(); } + + inline _Self& operator++() { // Preincrement + do { + std::pair<NodeType *, ChildItTy> &Top = VisitStack.back(); + NodeType *Node = Top.first; + ChildItTy &It = Top.second; + + while (It != GT::child_end(Node)) { + NodeType *Next = *It++; + if (!this->Visited.count(Next)) { // Has our next sibling been visited? + // No, do it now. + this->Visited.insert(Next); + VisitStack.push_back(std::make_pair(Next, GT::child_begin(Next))); + return *this; + } + } + + // Oops, ran out of successors... go up a level on the stack. + VisitStack.pop_back(); + } while (!VisitStack.empty()); + return *this; + } + + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } + + // nodeVisited - return true if this iterator has already visited the + // specified node. This is public, and will probably be used to iterate over + // nodes that a depth first iteration did not find: ie unreachable nodes. + // + inline bool nodeVisited(NodeType *Node) const { + return this->Visited.count(Node) != 0; + } +}; + + +// Provide global constructors that automatically figure out correct types... +// +template <class T> +df_iterator<T> df_begin(const T& G) { + return df_iterator<T>::begin(G); +} + +template <class T> +df_iterator<T> df_end(const T& G) { + return df_iterator<T>::end(G); +} + +// Provide global definitions of external depth first iterators... +template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> > +struct df_ext_iterator : public df_iterator<T, SetTy, true> { + df_ext_iterator(const df_iterator<T, SetTy, true> &V) + : df_iterator<T, SetTy, true>(V) {} +}; + +template <class T, class SetTy> +df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) { + return df_ext_iterator<T, SetTy>::begin(G, S); +} + +template <class T, class SetTy> +df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) { + return df_ext_iterator<T, SetTy>::end(G, S); +} + + +// Provide global definitions of inverse depth first iterators... +template <class T, + class SetTy = llvm::SmallPtrSet<typename GraphTraits<T>::NodeType*, 8>, + bool External = false> +struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> { + idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V) + : df_iterator<Inverse<T>, SetTy, External>(V) {} +}; + +template <class T> +idf_iterator<T> idf_begin(const T& G) { + return idf_iterator<T>::begin(Inverse<T>(G)); +} + +template <class T> +idf_iterator<T> idf_end(const T& G){ + return idf_iterator<T>::end(Inverse<T>(G)); +} + +// Provide global definitions of external inverse depth first iterators... +template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> > +struct idf_ext_iterator : public idf_iterator<T, SetTy, true> { + idf_ext_iterator(const idf_iterator<T, SetTy, true> &V) + : idf_iterator<T, SetTy, true>(V) {} + idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V) + : idf_iterator<T, SetTy, true>(V) {} +}; + +template <class T, class SetTy> +idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) { + return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S); +} + +template <class T, class SetTy> +idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) { + return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h new file mode 100644 index 0000000..6e00a21 --- /dev/null +++ b/include/llvm/ADT/EquivalenceClasses.h @@ -0,0 +1,279 @@ +//===-- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Generic implementation of equivalence classes through the use Tarjan's +// efficient union-find algorithm. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_EQUIVALENCECLASSES_H +#define LLVM_ADT_EQUIVALENCECLASSES_H + +#include "llvm/ADT/iterator.h" +#include "llvm/Support/DataTypes.h" +#include <set> + +namespace llvm { + +/// EquivalenceClasses - This represents a collection of equivalence classes and +/// supports three efficient operations: insert an element into a class of its +/// own, union two classes, and find the class for a given element. In +/// addition to these modification methods, it is possible to iterate over all +/// of the equivalence classes and all of the elements in a class. +/// +/// This implementation is an efficient implementation that only stores one copy +/// of the element being indexed per entry in the set, and allows any arbitrary +/// type to be indexed (as long as it can be ordered with operator<). +/// +/// Here is a simple example using integers: +/// +/// EquivalenceClasses<int> EC; +/// EC.unionSets(1, 2); // insert 1, 2 into the same set +/// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets +/// EC.unionSets(5, 1); // merge the set for 1 with 5's set. +/// +/// for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end(); +/// I != E; ++I) { // Iterate over all of the equivalence sets. +/// if (!I->isLeader()) continue; // Ignore non-leader sets. +/// for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I); +/// MI != EC.member_end(); ++MI) // Loop over members in this set. +/// cerr << *MI << " "; // Print member. +/// cerr << "\n"; // Finish set. +/// } +/// +/// This example prints: +/// 4 +/// 5 1 2 +/// +template <class ElemTy> +class EquivalenceClasses { + /// ECValue - The EquivalenceClasses data structure is just a set of these. + /// Each of these represents a relation for a value. First it stores the + /// value itself, which provides the ordering that the set queries. Next, it + /// provides a "next pointer", which is used to enumerate all of the elements + /// in the unioned set. Finally, it defines either a "end of list pointer" or + /// "leader pointer" depending on whether the value itself is a leader. A + /// "leader pointer" points to the node that is the leader for this element, + /// if the node is not a leader. A "end of list pointer" points to the last + /// node in the list of members of this list. Whether or not a node is a + /// leader is determined by a bit stolen from one of the pointers. + class ECValue { + friend class EquivalenceClasses; + mutable const ECValue *Leader, *Next; + ElemTy Data; + // ECValue ctor - Start out with EndOfList pointing to this node, Next is + // Null, isLeader = true. + ECValue(const ElemTy &Elt) + : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {} + + const ECValue *getLeader() const { + if (isLeader()) return this; + if (Leader->isLeader()) return Leader; + // Path compression. + return Leader = Leader->getLeader(); + } + const ECValue *getEndOfList() const { + assert(isLeader() && "Cannot get the end of a list for a non-leader!"); + return Leader; + } + + void setNext(const ECValue *NewNext) const { + assert(getNext() == 0 && "Already has a next pointer!"); + Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader()); + } + public: + ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1), + Data(RHS.Data) { + // Only support copying of singleton nodes. + assert(RHS.isLeader() && RHS.getNext() == 0 && "Not a singleton!"); + } + + bool operator<(const ECValue &UFN) const { return Data < UFN.Data; } + + bool isLeader() const { return (intptr_t)Next & 1; } + const ElemTy &getData() const { return Data; } + + const ECValue *getNext() const { + return (ECValue*)((intptr_t)Next & ~(intptr_t)1); + } + + template<typename T> + bool operator<(const T &Val) const { return Data < Val; } + }; + + /// TheMapping - This implicitly provides a mapping from ElemTy values to the + /// ECValues, it just keeps the key as part of the value. + std::set<ECValue> TheMapping; + +public: + EquivalenceClasses() {} + EquivalenceClasses(const EquivalenceClasses &RHS) { + operator=(RHS); + } + + const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) { + TheMapping.clear(); + for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) + if (I->isLeader()) { + member_iterator MI = RHS.member_begin(I); + member_iterator LeaderIt = member_begin(insert(*MI)); + for (++MI; MI != member_end(); ++MI) + unionSets(LeaderIt, member_begin(insert(*MI))); + } + return *this; + } + + //===--------------------------------------------------------------------===// + // Inspection methods + // + + /// iterator* - Provides a way to iterate over all values in the set. + typedef typename std::set<ECValue>::const_iterator iterator; + iterator begin() const { return TheMapping.begin(); } + iterator end() const { return TheMapping.end(); } + + bool empty() const { return TheMapping.empty(); } + + /// member_* Iterate over the members of an equivalence class. + /// + class member_iterator; + member_iterator member_begin(iterator I) const { + // Only leaders provide anything to iterate over. + return member_iterator(I->isLeader() ? &*I : 0); + } + member_iterator member_end() const { + return member_iterator(0); + } + + /// findValue - Return an iterator to the specified value. If it does not + /// exist, end() is returned. + iterator findValue(const ElemTy &V) const { + return TheMapping.find(V); + } + + /// getLeaderValue - Return the leader for the specified value that is in the + /// set. It is an error to call this method for a value that is not yet in + /// the set. For that, call getOrInsertLeaderValue(V). + const ElemTy &getLeaderValue(const ElemTy &V) const { + member_iterator MI = findLeader(V); + assert(MI != member_end() && "Value is not in the set!"); + return *MI; + } + + /// getOrInsertLeaderValue - Return the leader for the specified value that is + /// in the set. If the member is not in the set, it is inserted, then + /// returned. + const ElemTy &getOrInsertLeaderValue(const ElemTy &V) const { + member_iterator MI = findLeader(insert(V)); + assert(MI != member_end() && "Value is not in the set!"); + return *MI; + } + + /// getNumClasses - Return the number of equivalence classes in this set. + /// Note that this is a linear time operation. + unsigned getNumClasses() const { + unsigned NC = 0; + for (iterator I = begin(), E = end(); I != E; ++I) + if (I->isLeader()) ++NC; + return NC; + } + + + //===--------------------------------------------------------------------===// + // Mutation methods + + /// insert - Insert a new value into the union/find set, ignoring the request + /// if the value already exists. + iterator insert(const ElemTy &Data) { + return TheMapping.insert(Data).first; + } + + /// findLeader - Given a value in the set, return a member iterator for the + /// equivalence class it is in. This does the path-compression part that + /// makes union-find "union findy". This returns an end iterator if the value + /// is not in the equivalence class. + /// + member_iterator findLeader(iterator I) const { + if (I == TheMapping.end()) return member_end(); + return member_iterator(I->getLeader()); + } + member_iterator findLeader(const ElemTy &V) const { + return findLeader(TheMapping.find(V)); + } + + + /// union - Merge the two equivalence sets for the specified values, inserting + /// them if they do not already exist in the equivalence set. + member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) { + iterator V1I = insert(V1), V2I = insert(V2); + return unionSets(findLeader(V1I), findLeader(V2I)); + } + member_iterator unionSets(member_iterator L1, member_iterator L2) { + assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!"); + if (L1 == L2) return L1; // Unifying the same two sets, noop. + + // Otherwise, this is a real union operation. Set the end of the L1 list to + // point to the L2 leader node. + const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node; + L1LV.getEndOfList()->setNext(&L2LV); + + // Update L1LV's end of list pointer. + L1LV.Leader = L2LV.getEndOfList(); + + // Clear L2's leader flag: + L2LV.Next = L2LV.getNext(); + + // L2's leader is now L1. + L2LV.Leader = &L1LV; + return L1; + } + + class member_iterator : public forward_iterator<ElemTy, ptrdiff_t> { + typedef forward_iterator<const ElemTy, ptrdiff_t> super; + const ECValue *Node; + friend class EquivalenceClasses; + public: + typedef size_t size_type; + typedef typename super::pointer pointer; + typedef typename super::reference reference; + + explicit member_iterator() {} + explicit member_iterator(const ECValue *N) : Node(N) {} + member_iterator(const member_iterator &I) : Node(I.Node) {} + + reference operator*() const { + assert(Node != 0 && "Dereferencing end()!"); + return Node->getData(); + } + reference operator->() const { return operator*(); } + + member_iterator &operator++() { + assert(Node != 0 && "++'d off the end of the list!"); + Node = Node->getNext(); + return *this; + } + + member_iterator operator++(int) { // postincrement operators. + member_iterator tmp = *this; + ++*this; + return tmp; + } + + bool operator==(const member_iterator &RHS) const { + return Node == RHS.Node; + } + bool operator!=(const member_iterator &RHS) const { + return Node != RHS.Node; + } + }; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h new file mode 100644 index 0000000..e31e112 --- /dev/null +++ b/include/llvm/ADT/FoldingSet.h @@ -0,0 +1,461 @@ +//===-- llvm/ADT/FoldingSet.h - Uniquing Hash Set ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a hash set that can be used to remove duplication of nodes +// in a graph. This code was originally created by Chris Lattner for use with +// SelectionDAGCSEMap, but was isolated to provide use across the llvm code set. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_FOLDINGSET_H +#define LLVM_ADT_FOLDINGSET_H + +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/SmallVector.h" +#include <string> +#include <iterator> + +namespace llvm { + class APFloat; + class APInt; + +/// This folding set used for two purposes: +/// 1. Given information about a node we want to create, look up the unique +/// instance of the node in the set. If the node already exists, return +/// it, otherwise return the bucket it should be inserted into. +/// 2. Given a node that has already been created, remove it from the set. +/// +/// This class is implemented as a single-link chained hash table, where the +/// "buckets" are actually the nodes themselves (the next pointer is in the +/// node). The last node points back to the bucket to simplify node removal. +/// +/// Any node that is to be included in the folding set must be a subclass of +/// FoldingSetNode. The node class must also define a Profile method used to +/// establish the unique bits of data for the node. The Profile method is +/// passed a FoldingSetNodeID object which is used to gather the bits. Just +/// call one of the Add* functions defined in the FoldingSetImpl::NodeID class. +/// NOTE: That the folding set does not own the nodes and it is the +/// responsibility of the user to dispose of the nodes. +/// +/// Eg. +/// class MyNode : public FoldingSetNode { +/// private: +/// std::string Name; +/// unsigned Value; +/// public: +/// MyNode(const char *N, unsigned V) : Name(N), Value(V) {} +/// ... +/// void Profile(FoldingSetNodeID &ID) { +/// ID.AddString(Name); +/// ID.AddInteger(Value); +/// } +/// ... +/// }; +/// +/// To define the folding set itself use the FoldingSet template; +/// +/// Eg. +/// FoldingSet<MyNode> MyFoldingSet; +/// +/// Four public methods are available to manipulate the folding set; +/// +/// 1) If you have an existing node that you want add to the set but unsure +/// that the node might already exist then call; +/// +/// MyNode *M = MyFoldingSet.GetOrInsertNode(N); +/// +/// If The result is equal to the input then the node has been inserted. +/// Otherwise, the result is the node existing in the folding set, and the +/// input can be discarded (use the result instead.) +/// +/// 2) If you are ready to construct a node but want to check if it already +/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to +/// check; +/// +/// FoldingSetNodeID ID; +/// ID.AddString(Name); +/// ID.AddInteger(Value); +/// void *InsertPoint; +/// +/// MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint); +/// +/// If found then M with be non-NULL, else InsertPoint will point to where it +/// should be inserted using InsertNode. +/// +/// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new +/// node with FindNodeOrInsertPos; +/// +/// InsertNode(N, InsertPoint); +/// +/// 4) Finally, if you want to remove a node from the folding set call; +/// +/// bool WasRemoved = RemoveNode(N); +/// +/// The result indicates whether the node existed in the folding set. + +class FoldingSetNodeID; + +//===----------------------------------------------------------------------===// +/// FoldingSetImpl - Implements the folding set functionality. The main +/// structure is an array of buckets. Each bucket is indexed by the hash of +/// the nodes it contains. The bucket itself points to the nodes contained +/// in the bucket via a singly linked list. The last node in the list points +/// back to the bucket to facilitate node removal. +/// +class FoldingSetImpl { +protected: + /// Buckets - Array of bucket chains. + /// + void **Buckets; + + /// NumBuckets - Length of the Buckets array. Always a power of 2. + /// + unsigned NumBuckets; + + /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes + /// is greater than twice the number of buckets. + unsigned NumNodes; + +public: + explicit FoldingSetImpl(unsigned Log2InitSize = 6); + virtual ~FoldingSetImpl(); + + //===--------------------------------------------------------------------===// + /// Node - This class is used to maintain the singly linked bucket list in + /// a folding set. + /// + class Node { + private: + // NextInFoldingSetBucket - next link in the bucket list. + void *NextInFoldingSetBucket; + + public: + + Node() : NextInFoldingSetBucket(0) {} + + // Accessors + void *getNextInBucket() const { return NextInFoldingSetBucket; } + void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; } + }; + + /// clear - Remove all nodes from the folding set. + void clear(); + + /// RemoveNode - Remove a node from the folding set, returning true if one + /// was removed or false if the node was not in the folding set. + bool RemoveNode(Node *N); + + /// GetOrInsertNode - If there is an existing simple Node exactly + /// equal to the specified node, return it. Otherwise, insert 'N' and return + /// it instead. + Node *GetOrInsertNode(Node *N); + + /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, + /// return it. If not, return the insertion token that will make insertion + /// faster. + Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos); + + /// InsertNode - Insert the specified node into the folding set, knowing that + /// it is not already in the folding set. InsertPos must be obtained from + /// FindNodeOrInsertPos. + void InsertNode(Node *N, void *InsertPos); + + /// size - Returns the number of nodes in the folding set. + unsigned size() const { return NumNodes; } + + /// empty - Returns true if there are no nodes in the folding set. + bool empty() const { return NumNodes == 0; } + +private: + + /// GrowHashTable - Double the size of the hash table and rehash everything. + /// + void GrowHashTable(); + +protected: + + /// GetNodeProfile - Instantiations of the FoldingSet template implement + /// this function to gather data bits for the given node. + virtual void GetNodeProfile(FoldingSetNodeID &ID, Node *N) const = 0; +}; + +//===----------------------------------------------------------------------===// +/// FoldingSetTrait - This trait class is used to define behavior of how +/// to "profile" (in the FoldingSet parlance) an object of a given type. +/// The default behavior is to invoke a 'Profile' method on an object, but +/// through template specialization the behavior can be tailored for specific +/// types. Combined with the FoldingSetNodeWrapper classs, one can add objects +/// to FoldingSets that were not originally designed to have that behavior. +/// +template<typename T> struct FoldingSetTrait { + static inline void Profile(const T& X, FoldingSetNodeID& ID) { X.Profile(ID);} + static inline void Profile(T& X, FoldingSetNodeID& ID) { X.Profile(ID); } +}; + +//===--------------------------------------------------------------------===// +/// FoldingSetNodeID - This class is used to gather all the unique data bits of +/// a node. When all the bits are gathered this class is used to produce a +/// hash value for the node. +/// +class FoldingSetNodeID { + /// Bits - Vector of all the data bits that make the node unique. + /// Use a SmallVector to avoid a heap allocation in the common case. + SmallVector<unsigned, 32> Bits; + +public: + FoldingSetNodeID() {} + + /// getRawData - Return the ith entry in the Bits data. + /// + unsigned getRawData(unsigned i) const { + return Bits[i]; + } + + /// Add* - Add various data types to Bit data. + /// + void AddPointer(const void *Ptr); + void AddInteger(signed I); + void AddInteger(unsigned I); + void AddInteger(long I); + void AddInteger(unsigned long I); + void AddInteger(long long I); + void AddInteger(unsigned long long I); + void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); } + void AddString(const char* String, const char* End); + void AddString(const std::string &String); + void AddString(const char* String); + + template <typename T> + inline void Add(const T& x) { FoldingSetTrait<T>::Profile(x, *this); } + + /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID + /// object to be used to compute a new profile. + inline void clear() { Bits.clear(); } + + /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used + /// to lookup the node in the FoldingSetImpl. + unsigned ComputeHash() const; + + /// operator== - Used to compare two nodes to each other. + /// + bool operator==(const FoldingSetNodeID &RHS) const; +}; + +// Convenience type to hide the implementation of the folding set. +typedef FoldingSetImpl::Node FoldingSetNode; +template<class T> class FoldingSetIterator; +template<class T> class FoldingSetBucketIterator; + +//===----------------------------------------------------------------------===// +/// FoldingSet - This template class is used to instantiate a specialized +/// implementation of the folding set to the node class T. T must be a +/// subclass of FoldingSetNode and implement a Profile function. +/// +template<class T> class FoldingSet : public FoldingSetImpl { +private: + /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a + /// way to convert nodes into a unique specifier. + virtual void GetNodeProfile(FoldingSetNodeID &ID, Node *N) const { + T *TN = static_cast<T *>(N); + FoldingSetTrait<T>::Profile(*TN,ID); + } + +public: + explicit FoldingSet(unsigned Log2InitSize = 6) + : FoldingSetImpl(Log2InitSize) + {} + + typedef FoldingSetIterator<T> iterator; + iterator begin() { return iterator(Buckets); } + iterator end() { return iterator(Buckets+NumBuckets); } + + typedef FoldingSetIterator<const T> const_iterator; + const_iterator begin() const { return const_iterator(Buckets); } + const_iterator end() const { return const_iterator(Buckets+NumBuckets); } + + typedef FoldingSetBucketIterator<T> bucket_iterator; + + bucket_iterator bucket_begin(unsigned hash) { + return bucket_iterator(Buckets + (hash & (NumBuckets-1))); + } + + bucket_iterator bucket_end(unsigned hash) { + return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true); + } + + /// GetOrInsertNode - If there is an existing simple Node exactly + /// equal to the specified node, return it. Otherwise, insert 'N' and + /// return it instead. + T *GetOrInsertNode(Node *N) { + return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N)); + } + + /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, + /// return it. If not, return the insertion token that will make insertion + /// faster. + T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) { + return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos)); + } +}; + +//===----------------------------------------------------------------------===// +/// FoldingSetIteratorImpl - This is the common iterator support shared by all +/// folding sets, which knows how to walk the folding set hash table. +class FoldingSetIteratorImpl { +protected: + FoldingSetNode *NodePtr; + FoldingSetIteratorImpl(void **Bucket); + void advance(); + +public: + bool operator==(const FoldingSetIteratorImpl &RHS) const { + return NodePtr == RHS.NodePtr; + } + bool operator!=(const FoldingSetIteratorImpl &RHS) const { + return NodePtr != RHS.NodePtr; + } +}; + + +template<class T> +class FoldingSetIterator : public FoldingSetIteratorImpl { +public: + explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {} + + T &operator*() const { + return *static_cast<T*>(NodePtr); + } + + T *operator->() const { + return static_cast<T*>(NodePtr); + } + + inline FoldingSetIterator& operator++() { // Preincrement + advance(); + return *this; + } + FoldingSetIterator operator++(int) { // Postincrement + FoldingSetIterator tmp = *this; ++*this; return tmp; + } +}; + +//===----------------------------------------------------------------------===// +/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support +/// shared by all folding sets, which knows how to walk a particular bucket +/// of a folding set hash table. + +class FoldingSetBucketIteratorImpl { +protected: + void *Ptr; + + explicit FoldingSetBucketIteratorImpl(void **Bucket); + + FoldingSetBucketIteratorImpl(void **Bucket, bool) + : Ptr(Bucket) {} + + void advance() { + void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket(); + uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1; + Ptr = reinterpret_cast<void*>(x); + } + +public: + bool operator==(const FoldingSetBucketIteratorImpl &RHS) const { + return Ptr == RHS.Ptr; + } + bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const { + return Ptr != RHS.Ptr; + } +}; + + +template<class T> +class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl { +public: + explicit FoldingSetBucketIterator(void **Bucket) : + FoldingSetBucketIteratorImpl(Bucket) {} + + FoldingSetBucketIterator(void **Bucket, bool) : + FoldingSetBucketIteratorImpl(Bucket, true) {} + + T& operator*() const { return *static_cast<T*>(Ptr); } + T* operator->() const { return static_cast<T*>(Ptr); } + + inline FoldingSetBucketIterator& operator++() { // Preincrement + advance(); + return *this; + } + FoldingSetBucketIterator operator++(int) { // Postincrement + FoldingSetBucketIterator tmp = *this; ++*this; return tmp; + } +}; + +//===----------------------------------------------------------------------===// +/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary +/// types in an enclosing object so that they can be inserted into FoldingSets. +template <typename T> +class FoldingSetNodeWrapper : public FoldingSetNode { + T data; +public: + explicit FoldingSetNodeWrapper(const T& x) : data(x) {} + virtual ~FoldingSetNodeWrapper() {} + + template<typename A1> + explicit FoldingSetNodeWrapper(const A1& a1) + : data(a1) {} + + template <typename A1, typename A2> + explicit FoldingSetNodeWrapper(const A1& a1, const A2& a2) + : data(a1,a2) {} + + template <typename A1, typename A2, typename A3> + explicit FoldingSetNodeWrapper(const A1& a1, const A2& a2, const A3& a3) + : data(a1,a2,a3) {} + + template <typename A1, typename A2, typename A3, typename A4> + explicit FoldingSetNodeWrapper(const A1& a1, const A2& a2, const A3& a3, + const A4& a4) + : data(a1,a2,a3,a4) {} + + template <typename A1, typename A2, typename A3, typename A4, typename A5> + explicit FoldingSetNodeWrapper(const A1& a1, const A2& a2, const A3& a3, + const A4& a4, const A5& a5) + : data(a1,a2,a3,a4,a5) {} + + + void Profile(FoldingSetNodeID& ID) { FoldingSetTrait<T>::Profile(data, ID); } + + T& getValue() { return data; } + const T& getValue() const { return data; } + + operator T&() { return data; } + operator const T&() const { return data; } +}; + +//===----------------------------------------------------------------------===// +// Partial specializations of FoldingSetTrait. + +template<typename T> struct FoldingSetTrait<T*> { + static inline void Profile(const T* X, FoldingSetNodeID& ID) { + ID.AddPointer(X); + } + static inline void Profile(T* X, FoldingSetNodeID& ID) { + ID.AddPointer(X); + } +}; + +template<typename T> struct FoldingSetTrait<const T*> { + static inline void Profile(const T* X, FoldingSetNodeID& ID) { + ID.AddPointer(X); + } +}; + +} // End of namespace llvm. + +#endif diff --git a/include/llvm/ADT/GraphTraits.h b/include/llvm/ADT/GraphTraits.h new file mode 100644 index 0000000..2d103cf --- /dev/null +++ b/include/llvm/ADT/GraphTraits.h @@ -0,0 +1,103 @@ +//===-- llvm/ADT/GraphTraits.h - Graph traits template ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the little GraphTraits<X> template class that should be +// specialized by classes that want to be iteratable by generic graph iterators. +// +// This file also defines the marker class Inverse that is used to iterate over +// graphs in a graph defined, inverse ordering... +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_GRAPHTRAITS_H +#define LLVM_ADT_GRAPHTRAITS_H + +namespace llvm { + +// GraphTraits - This class should be specialized by different graph types... +// which is why the default version is empty. +// +template<class GraphType> +struct GraphTraits { + // Elements to provide: + + // typedef NodeType - Type of Node in the graph + // typedef ChildIteratorType - Type used to iterate over children in graph + + // static NodeType *getEntryNode(GraphType *) + // Return the entry node of the graph + + // static ChildIteratorType child_begin(NodeType *) + // static ChildIteratorType child_end (NodeType *) + // Return iterators that point to the beginning and ending of the child + // node list for the specified node. + // + + + // typedef ...iterator nodes_iterator; + // static nodes_iterator nodes_begin(GraphType *G) + // static nodes_iterator nodes_end (GraphType *G) + // + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + + + // If anyone tries to use this class without having an appropriate + // specialization, make an error. If you get this error, it's because you + // need to include the appropriate specialization of GraphTraits<> for your + // graph, or you need to define it for a new graph type. Either that or + // your argument to XXX_begin(...) is unknown or needs to have the proper .h + // file #include'd. + // + typedef typename GraphType::UnknownGraphTypeError NodeType; +}; + + +// Inverse - This class is used as a little marker class to tell the graph +// iterator to iterate over the graph in a graph defined "Inverse" ordering. +// Not all graphs define an inverse ordering, and if they do, it depends on +// the graph exactly what that is. Here's an example of usage with the +// df_iterator: +// +// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M); +// for (; I != E; ++I) { ... } +// +// Which is equivalent to: +// df_iterator<Inverse<Method*> > I = idf_begin(M), E = idf_end(M); +// for (; I != E; ++I) { ... } +// +template <class GraphType> +struct Inverse { + const GraphType &Graph; + + inline Inverse(const GraphType &G) : Graph(G) {} +}; + +// Provide a partial specialization of GraphTraits so that the inverse of an +// inverse falls back to the original graph. +template<class T> +struct GraphTraits<Inverse<Inverse<T> > > { + typedef typename GraphTraits<T>::NodeType NodeType; + typedef typename GraphTraits<T>::ChildIteratorType ChildIteratorType; + + static NodeType *getEntryNode(Inverse<Inverse<T> > *G) { + return GraphTraits<T>::getEntryNode(G->Graph.Graph); + } + + static ChildIteratorType child_begin(NodeType* N) { + return GraphTraits<T>::child_begin(N); + } + + static ChildIteratorType child_end(NodeType* N) { + return GraphTraits<T>::child_end(N); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/HashExtras.h b/include/llvm/ADT/HashExtras.h new file mode 100644 index 0000000..20c4fd3 --- /dev/null +++ b/include/llvm/ADT/HashExtras.h @@ -0,0 +1,40 @@ +//===-- llvm/ADT/HashExtras.h - Useful functions for STL hash ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL Hashed containers. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_HASHEXTRAS_H +#define LLVM_ADT_HASHEXTRAS_H + +#include <string> + +// Cannot specialize hash template from outside of the std namespace. +namespace HASH_NAMESPACE { + +// Provide a hash function for arbitrary pointers... +template <class T> struct hash<T *> { + inline size_t operator()(const T *Val) const { + return reinterpret_cast<size_t>(Val); + } +}; + +template <> struct hash<std::string> { + size_t operator()(std::string const &str) const { + return hash<char const *>()(str.c_str()); + } +}; + +} // End namespace std + +#endif diff --git a/include/llvm/ADT/ImmutableList.h b/include/llvm/ADT/ImmutableList.h new file mode 100644 index 0000000..a7f5819 --- /dev/null +++ b/include/llvm/ADT/ImmutableList.h @@ -0,0 +1,219 @@ +//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ImmutableList class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_IMLIST_H +#define LLVM_ADT_IMLIST_H + +#include "llvm/Support/Allocator.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/Support/DataTypes.h" +#include <cassert> + +namespace llvm { + +template <typename T> class ImmutableListFactory; + +template <typename T> +class ImmutableListImpl : public FoldingSetNode { + T Head; + const ImmutableListImpl* Tail; + + ImmutableListImpl(const T& head, const ImmutableListImpl* tail = 0) + : Head(head), Tail(tail) {} + + friend class ImmutableListFactory<T>; + + // Do not implement. + void operator=(const ImmutableListImpl&); + ImmutableListImpl(const ImmutableListImpl&); + +public: + const T& getHead() const { return Head; } + const ImmutableListImpl* getTail() const { return Tail; } + + static inline void Profile(FoldingSetNodeID& ID, const T& H, + const ImmutableListImpl* L){ + ID.AddPointer(L); + ID.Add(H); + } + + void Profile(FoldingSetNodeID& ID) { + Profile(ID, Head, Tail); + } +}; + +/// ImmutableList - This class represents an immutable (functional) list. +/// It is implemented as a smart pointer (wraps ImmutableListImpl), so it +/// it is intended to always be copied by value as if it were a pointer. +/// This interface matches ImmutableSet and ImmutableMap. ImmutableList +/// objects should almost never be created directly, and instead should +/// be created by ImmutableListFactory objects that manage the lifetime +/// of a group of lists. When the factory object is reclaimed, all lists +/// created by that factory are released as well. +template <typename T> +class ImmutableList { +public: + typedef T value_type; + typedef ImmutableListFactory<T> Factory; + +private: + const ImmutableListImpl<T>* X; + +public: + // This constructor should normally only be called by ImmutableListFactory<T>. + // There may be cases, however, when one needs to extract the internal pointer + // and reconstruct a list object from that pointer. + ImmutableList(const ImmutableListImpl<T>* x = 0) : X(x) {} + + const ImmutableListImpl<T>* getInternalPointer() const { + return X; + } + + class iterator { + const ImmutableListImpl<T>* L; + public: + iterator() : L(0) {} + iterator(ImmutableList l) : L(l.getInternalPointer()) {} + + iterator& operator++() { L = L->getTail(); return *this; } + bool operator==(const iterator& I) const { return L == I.L; } + bool operator!=(const iterator& I) const { return L != I.L; } + const value_type& operator*() const { return L->getHead(); } + ImmutableList getList() const { return L; } + }; + + /// begin - Returns an iterator referring to the head of the list, or + /// an iterator denoting the end of the list if the list is empty. + iterator begin() const { return iterator(X); } + + /// end - Returns an iterator denoting the end of the list. This iterator + /// does not refer to a valid list element. + iterator end() const { return iterator(); } + + /// isEmpty - Returns true if the list is empty. + bool isEmpty() const { return !X; } + + /// isEqual - Returns true if two lists are equal. Because all lists created + /// from the same ImmutableListFactory are uniqued, this has O(1) complexity + /// because it the contents of the list do not need to be compared. Note + /// that you should only compare two lists created from the same + /// ImmutableListFactory. + bool isEqual(const ImmutableList& L) const { return X == L.X; } + + bool operator==(const ImmutableList& L) const { return isEqual(L); } + + /// getHead - Returns the head of the list. + const T& getHead() { + assert (!isEmpty() && "Cannot get the head of an empty list."); + return X->getHead(); + } + + /// getTail - Returns the tail of the list, which is another (possibly empty) + /// ImmutableList. + ImmutableList getTail() { + return X ? X->getTail() : 0; + } + + void Profile(FoldingSetNodeID& ID) const { + ID.AddPointer(X); + } +}; + +template <typename T> +class ImmutableListFactory { + typedef ImmutableListImpl<T> ListTy; + typedef FoldingSet<ListTy> CacheTy; + + CacheTy Cache; + uintptr_t Allocator; + + bool ownsAllocator() const { + return Allocator & 0x1 ? false : true; + } + + BumpPtrAllocator& getAllocator() const { + return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1); + } + +public: + ImmutableListFactory() + : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {} + + ImmutableListFactory(BumpPtrAllocator& Alloc) + : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {} + + ~ImmutableListFactory() { + if (ownsAllocator()) delete &getAllocator(); + } + + ImmutableList<T> Concat(const T& Head, ImmutableList<T> Tail) { + // Profile the new list to see if it already exists in our cache. + FoldingSetNodeID ID; + void* InsertPos; + + const ListTy* TailImpl = Tail.getInternalPointer(); + ListTy::Profile(ID, Head, TailImpl); + ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos); + + if (!L) { + // The list does not exist in our cache. Create it. + BumpPtrAllocator& A = getAllocator(); + L = (ListTy*) A.Allocate<ListTy>(); + new (L) ListTy(Head, TailImpl); + + // Insert the new list into the cache. + Cache.InsertNode(L, InsertPos); + } + + return L; + } + + ImmutableList<T> Add(const T& D, ImmutableList<T> L) { + return Concat(D, L); + } + + ImmutableList<T> GetEmptyList() const { + return ImmutableList<T>(0); + } + + ImmutableList<T> Create(const T& X) { + return Concat(X, GetEmptyList()); + } +}; + +//===----------------------------------------------------------------------===// +// Partially-specialized Traits. +//===----------------------------------------------------------------------===// + +template<typename T> struct DenseMapInfo; +template<typename T> struct DenseMapInfo<ImmutableList<T> > { + static inline ImmutableList<T> getEmptyKey() { + return reinterpret_cast<ImmutableListImpl<T>*>(-1); + } + static inline ImmutableList<T> getTombstoneKey() { + return reinterpret_cast<ImmutableListImpl<T>*>(-2); + } + static unsigned getHashValue(ImmutableList<T> X) { + uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer()); + return (unsigned((uintptr_t)PtrVal) >> 4) ^ + (unsigned((uintptr_t)PtrVal) >> 9); + } + static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) { + return X1 == X2; + } + static bool isPod() { return true; } +}; + +} // end llvm namespace + +#endif diff --git a/include/llvm/ADT/ImmutableMap.h b/include/llvm/ADT/ImmutableMap.h new file mode 100644 index 0000000..52708bc --- /dev/null +++ b/include/llvm/ADT/ImmutableMap.h @@ -0,0 +1,230 @@ +//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ImmutableMap class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_IMMAP_H +#define LLVM_ADT_IMMAP_H + +#include "llvm/ADT/ImmutableSet.h" + +namespace llvm { + +/// ImutKeyValueInfo -Traits class used by ImmutableMap. While both the first +/// and second elements in a pair are used to generate profile information, +/// only the first element (the key) is used by isEqual and isLess. +template <typename T, typename S> +struct ImutKeyValueInfo { + typedef const std::pair<T,S> value_type; + typedef const value_type& value_type_ref; + typedef const T key_type; + typedef const T& key_type_ref; + typedef const S data_type; + typedef const S& data_type_ref; + + static inline key_type_ref KeyOfValue(value_type_ref V) { + return V.first; + } + + static inline data_type_ref DataOfValue(value_type_ref V) { + return V.second; + } + + static inline bool isEqual(key_type_ref L, key_type_ref R) { + return ImutContainerInfo<T>::isEqual(L,R); + } + static inline bool isLess(key_type_ref L, key_type_ref R) { + return ImutContainerInfo<T>::isLess(L,R); + } + + static inline bool isDataEqual(data_type_ref L, data_type_ref R) { + return ImutContainerInfo<S>::isEqual(L,R); + } + + static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) { + ImutContainerInfo<T>::Profile(ID, V.first); + ImutContainerInfo<S>::Profile(ID, V.second); + } +}; + + +template <typename KeyT, typename ValT, + typename ValInfo = ImutKeyValueInfo<KeyT,ValT> > +class ImmutableMap { +public: + typedef typename ValInfo::value_type value_type; + typedef typename ValInfo::value_type_ref value_type_ref; + typedef typename ValInfo::key_type key_type; + typedef typename ValInfo::key_type_ref key_type_ref; + typedef typename ValInfo::data_type data_type; + typedef typename ValInfo::data_type_ref data_type_ref; + typedef ImutAVLTree<ValInfo> TreeTy; + +private: + TreeTy* Root; + +public: + /// Constructs a map from a pointer to a tree root. In general one + /// should use a Factory object to create maps instead of directly + /// invoking the constructor, but there are cases where make this + /// constructor public is useful. + explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {} + + class Factory { + typename TreeTy::Factory F; + + public: + Factory() {} + + Factory(BumpPtrAllocator& Alloc) + : F(Alloc) {} + + ImmutableMap GetEmptyMap() { return ImmutableMap(F.GetEmptyTree()); } + + ImmutableMap Add(ImmutableMap Old, key_type_ref K, data_type_ref D) { + return ImmutableMap(F.Add(Old.Root, + std::make_pair<key_type,data_type>(K,D))); + } + + ImmutableMap Remove(ImmutableMap Old, key_type_ref K) { + return ImmutableMap(F.Remove(Old.Root,K)); + } + + private: + Factory(const Factory& RHS) {}; + void operator=(const Factory& RHS) {}; + }; + + friend class Factory; + + bool contains(key_type_ref K) const { + return Root ? Root->contains(K) : false; + } + + + bool operator==(ImmutableMap RHS) const { + return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; + } + + bool operator!=(ImmutableMap RHS) const { + return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; + } + + TreeTy* getRoot() const { return Root; } + + bool isEmpty() const { return !Root; } + + //===--------------------------------------------------===// + // Foreach - A limited form of map iteration. + //===--------------------------------------------------===// + +private: + template <typename Callback> + struct CBWrapper { + Callback C; + void operator()(value_type_ref V) { C(V.first,V.second); } + }; + + template <typename Callback> + struct CBWrapperRef { + Callback &C; + CBWrapperRef(Callback& c) : C(c) {} + + void operator()(value_type_ref V) { C(V.first,V.second); } + }; + +public: + template <typename Callback> + void foreach(Callback& C) { + if (Root) { + CBWrapperRef<Callback> CB(C); + Root->foreach(CB); + } + } + + template <typename Callback> + void foreach() { + if (Root) { + CBWrapper<Callback> CB; + Root->foreach(CB); + } + } + + //===--------------------------------------------------===// + // For testing. + //===--------------------------------------------------===// + + void verify() const { if (Root) Root->verify(); } + + //===--------------------------------------------------===// + // Iterators. + //===--------------------------------------------------===// + + class iterator { + typename TreeTy::iterator itr; + + iterator() {} + iterator(TreeTy* t) : itr(t) {} + friend class ImmutableMap; + + public: + value_type_ref operator*() const { return itr->getValue(); } + value_type* operator->() const { return &itr->getValue(); } + + key_type_ref getKey() const { return itr->getValue().first; } + data_type_ref getData() const { return itr->getValue().second; } + + + iterator& operator++() { ++itr; return *this; } + iterator operator++(int) { iterator tmp(*this); ++itr; return tmp; } + iterator& operator--() { --itr; return *this; } + iterator operator--(int) { iterator tmp(*this); --itr; return tmp; } + bool operator==(const iterator& RHS) const { return RHS.itr == itr; } + bool operator!=(const iterator& RHS) const { return RHS.itr != itr; } + }; + + iterator begin() const { return iterator(Root); } + iterator end() const { return iterator(); } + + data_type* lookup(key_type_ref K) const { + if (Root) { + TreeTy* T = Root->find(K); + if (T) return &T->getValue().second; + } + + return 0; + } + + /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for + /// which key is the highest in the ordering of keys in the map. This + /// method returns NULL if the map is empty. + value_type* getMaxElement() const { + return Root ? &(Root->getMaxElement()->getValue()) : 0; + } + + //===--------------------------------------------------===// + // Utility methods. + //===--------------------------------------------------===// + + inline unsigned getHeight() const { return Root ? Root->getHeight() : 0; } + + static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) { + ID.AddPointer(M.Root); + } + + inline void Profile(FoldingSetNodeID& ID) const { + return Profile(ID,*this); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h new file mode 100644 index 0000000..be274db --- /dev/null +++ b/include/llvm/ADT/ImmutableSet.h @@ -0,0 +1,1070 @@ +//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ImutAVLTree and ImmutableSet classes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_IMSET_H +#define LLVM_ADT_IMSET_H + +#include "llvm/Support/Allocator.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/Support/DataTypes.h" +#include <cassert> +#include <functional> + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Immutable AVL-Tree Definition. +//===----------------------------------------------------------------------===// + +template <typename ImutInfo> class ImutAVLFactory; +template <typename ImutInfo> class ImutAVLTreeInOrderIterator; +template <typename ImutInfo> class ImutAVLTreeGenericIterator; + +template <typename ImutInfo > +class ImutAVLTree : public FoldingSetNode { +public: + typedef typename ImutInfo::key_type_ref key_type_ref; + typedef typename ImutInfo::value_type value_type; + typedef typename ImutInfo::value_type_ref value_type_ref; + + typedef ImutAVLFactory<ImutInfo> Factory; + friend class ImutAVLFactory<ImutInfo>; + + friend class ImutAVLTreeGenericIterator<ImutInfo>; + friend class FoldingSet<ImutAVLTree>; + + typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator; + + //===----------------------------------------------------===// + // Public Interface. + //===----------------------------------------------------===// + + /// getLeft - Returns a pointer to the left subtree. This value + /// is NULL if there is no left subtree. + ImutAVLTree* getLeft() const { + assert (!isMutable() && "Node is incorrectly marked mutable."); + + return reinterpret_cast<ImutAVLTree*>(Left); + } + + /// getRight - Returns a pointer to the right subtree. This value is + /// NULL if there is no right subtree. + ImutAVLTree* getRight() const { return Right; } + + /// getHeight - Returns the height of the tree. A tree with no subtrees + /// has a height of 1. + unsigned getHeight() const { return Height; } + + /// getValue - Returns the data value associated with the tree node. + const value_type& getValue() const { return Value; } + + /// find - Finds the subtree associated with the specified key value. + /// This method returns NULL if no matching subtree is found. + ImutAVLTree* find(key_type_ref K) { + ImutAVLTree *T = this; + + while (T) { + key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue()); + + if (ImutInfo::isEqual(K,CurrentKey)) + return T; + else if (ImutInfo::isLess(K,CurrentKey)) + T = T->getLeft(); + else + T = T->getRight(); + } + + return NULL; + } + + /// getMaxElement - Find the subtree associated with the highest ranged + /// key value. + ImutAVLTree* getMaxElement() { + ImutAVLTree *T = this; + ImutAVLTree *Right = T->getRight(); + while (Right) { T = Right; Right = T->getRight(); } + return T; + } + + /// size - Returns the number of nodes in the tree, which includes + /// both leaves and non-leaf nodes. + unsigned size() const { + unsigned n = 1; + + if (const ImutAVLTree* L = getLeft()) n += L->size(); + if (const ImutAVLTree* R = getRight()) n += R->size(); + + return n; + } + + /// begin - Returns an iterator that iterates over the nodes of the tree + /// in an inorder traversal. The returned iterator thus refers to the + /// the tree node with the minimum data element. + iterator begin() const { return iterator(this); } + + /// end - Returns an iterator for the tree that denotes the end of an + /// inorder traversal. + iterator end() const { return iterator(); } + + bool ElementEqual(value_type_ref V) const { + // Compare the keys. + if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()), + ImutInfo::KeyOfValue(V))) + return false; + + // Also compare the data values. + if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()), + ImutInfo::DataOfValue(V))) + return false; + + return true; + } + + bool ElementEqual(const ImutAVLTree* RHS) const { + return ElementEqual(RHS->getValue()); + } + + /// isEqual - Compares two trees for structural equality and returns true + /// if they are equal. This worst case performance of this operation is + // linear in the sizes of the trees. + bool isEqual(const ImutAVLTree& RHS) const { + if (&RHS == this) + return true; + + iterator LItr = begin(), LEnd = end(); + iterator RItr = RHS.begin(), REnd = RHS.end(); + + while (LItr != LEnd && RItr != REnd) { + if (*LItr == *RItr) { + LItr.SkipSubTree(); + RItr.SkipSubTree(); + continue; + } + + if (!LItr->ElementEqual(*RItr)) + return false; + + ++LItr; + ++RItr; + } + + return LItr == LEnd && RItr == REnd; + } + + /// isNotEqual - Compares two trees for structural inequality. Performance + /// is the same is isEqual. + bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); } + + /// contains - Returns true if this tree contains a subtree (node) that + /// has an data element that matches the specified key. Complexity + /// is logarithmic in the size of the tree. + bool contains(const key_type_ref K) { return (bool) find(K); } + + /// foreach - A member template the accepts invokes operator() on a functor + /// object (specifed by Callback) for every node/subtree in the tree. + /// Nodes are visited using an inorder traversal. + template <typename Callback> + void foreach(Callback& C) { + if (ImutAVLTree* L = getLeft()) L->foreach(C); + + C(Value); + + if (ImutAVLTree* R = getRight()) R->foreach(C); + } + + /// verify - A utility method that checks that the balancing and + /// ordering invariants of the tree are satisifed. It is a recursive + /// method that returns the height of the tree, which is then consumed + /// by the enclosing verify call. External callers should ignore the + /// return value. An invalid tree will cause an assertion to fire in + /// a debug build. + unsigned verify() const { + unsigned HL = getLeft() ? getLeft()->verify() : 0; + unsigned HR = getRight() ? getRight()->verify() : 0; + + assert (getHeight() == ( HL > HR ? HL : HR ) + 1 + && "Height calculation wrong."); + + assert ((HL > HR ? HL-HR : HR-HL) <= 2 + && "Balancing invariant violated."); + + + assert (!getLeft() + || ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()), + ImutInfo::KeyOfValue(getValue())) + && "Value in left child is not less that current value."); + + + assert (!getRight() + || ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()), + ImutInfo::KeyOfValue(getRight()->getValue())) + && "Current value is not less that value of right child."); + + return getHeight(); + } + + /// Profile - Profiling for ImutAVLTree. + void Profile(llvm::FoldingSetNodeID& ID) { + ID.AddInteger(ComputeDigest()); + } + + //===----------------------------------------------------===// + // Internal Values. + //===----------------------------------------------------===// + +private: + uintptr_t Left; + ImutAVLTree* Right; + unsigned Height; + value_type Value; + unsigned Digest; + + //===----------------------------------------------------===// + // Internal methods (node manipulation; used by Factory). + //===----------------------------------------------------===// + +private: + + enum { Mutable = 0x1 }; + + /// ImutAVLTree - Internal constructor that is only called by + /// ImutAVLFactory. + ImutAVLTree(ImutAVLTree* l, ImutAVLTree* r, value_type_ref v, unsigned height) + : Left(reinterpret_cast<uintptr_t>(l) | Mutable), + Right(r), Height(height), Value(v), Digest(0) {} + + + /// isMutable - Returns true if the left and right subtree references + /// (as well as height) can be changed. If this method returns false, + /// the tree is truly immutable. Trees returned from an ImutAVLFactory + /// object should always have this method return true. Further, if this + /// method returns false for an instance of ImutAVLTree, all subtrees + /// will also have this method return false. The converse is not true. + bool isMutable() const { return Left & Mutable; } + + /// getSafeLeft - Returns the pointer to the left tree by always masking + /// out the mutable bit. This is used internally by ImutAVLFactory, + /// as no trees returned to the client should have the mutable flag set. + ImutAVLTree* getSafeLeft() const { + return reinterpret_cast<ImutAVLTree*>(Left & ~Mutable); + } + + //===----------------------------------------------------===// + // Mutating operations. A tree root can be manipulated as + // long as its reference has not "escaped" from internal + // methods of a factory object (see below). When a tree + // pointer is externally viewable by client code, the + // internal "mutable bit" is cleared to mark the tree + // immutable. Note that a tree that still has its mutable + // bit set may have children (subtrees) that are themselves + // immutable. + //===----------------------------------------------------===// + + + /// MarkImmutable - Clears the mutable flag for a tree. After this happens, + /// it is an error to call setLeft(), setRight(), and setHeight(). It + /// is also then safe to call getLeft() instead of getSafeLeft(). + void MarkImmutable() { + assert (isMutable() && "Mutable flag already removed."); + Left &= ~Mutable; + } + + /// setLeft - Changes the reference of the left subtree. Used internally + /// by ImutAVLFactory. + void setLeft(ImutAVLTree* NewLeft) { + assert (isMutable() && + "Only a mutable tree can have its left subtree changed."); + + Left = reinterpret_cast<uintptr_t>(NewLeft) | Mutable; + } + + /// setRight - Changes the reference of the right subtree. Used internally + /// by ImutAVLFactory. + void setRight(ImutAVLTree* NewRight) { + assert (isMutable() && + "Only a mutable tree can have its right subtree changed."); + + Right = NewRight; + } + + /// setHeight - Changes the height of the tree. Used internally by + /// ImutAVLFactory. + void setHeight(unsigned h) { + assert (isMutable() && "Only a mutable tree can have its height changed."); + Height = h; + } + + + static inline + unsigned ComputeDigest(ImutAVLTree* L, ImutAVLTree* R, value_type_ref V) { + unsigned digest = 0; + + if (L) digest += L->ComputeDigest(); + + { // Compute digest of stored data. + FoldingSetNodeID ID; + ImutInfo::Profile(ID,V); + digest += ID.ComputeHash(); + } + + if (R) digest += R->ComputeDigest(); + + return digest; + } + + inline unsigned ComputeDigest() { + if (Digest) return Digest; + + unsigned X = ComputeDigest(getSafeLeft(), getRight(), getValue()); + if (!isMutable()) Digest = X; + + return X; + } +}; + +//===----------------------------------------------------------------------===// +// Immutable AVL-Tree Factory class. +//===----------------------------------------------------------------------===// + +template <typename ImutInfo > +class ImutAVLFactory { + typedef ImutAVLTree<ImutInfo> TreeTy; + typedef typename TreeTy::value_type_ref value_type_ref; + typedef typename TreeTy::key_type_ref key_type_ref; + + typedef FoldingSet<TreeTy> CacheTy; + + CacheTy Cache; + uintptr_t Allocator; + + bool ownsAllocator() const { + return Allocator & 0x1 ? false : true; + } + + BumpPtrAllocator& getAllocator() const { + return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1); + } + + //===--------------------------------------------------===// + // Public interface. + //===--------------------------------------------------===// + +public: + ImutAVLFactory() + : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {} + + ImutAVLFactory(BumpPtrAllocator& Alloc) + : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {} + + ~ImutAVLFactory() { + if (ownsAllocator()) delete &getAllocator(); + } + + TreeTy* Add(TreeTy* T, value_type_ref V) { + T = Add_internal(V,T); + MarkImmutable(T); + return T; + } + + TreeTy* Remove(TreeTy* T, key_type_ref V) { + T = Remove_internal(V,T); + MarkImmutable(T); + return T; + } + + TreeTy* GetEmptyTree() const { return NULL; } + + //===--------------------------------------------------===// + // A bunch of quick helper functions used for reasoning + // about the properties of trees and their children. + // These have succinct names so that the balancing code + // is as terse (and readable) as possible. + //===--------------------------------------------------===// +private: + + bool isEmpty(TreeTy* T) const { return !T; } + unsigned Height(TreeTy* T) const { return T ? T->getHeight() : 0; } + TreeTy* Left(TreeTy* T) const { return T->getSafeLeft(); } + TreeTy* Right(TreeTy* T) const { return T->getRight(); } + value_type_ref Value(TreeTy* T) const { return T->Value; } + + unsigned IncrementHeight(TreeTy* L, TreeTy* R) const { + unsigned hl = Height(L); + unsigned hr = Height(R); + return ( hl > hr ? hl : hr ) + 1; + } + + + static bool CompareTreeWithSection(TreeTy* T, + typename TreeTy::iterator& TI, + typename TreeTy::iterator& TE) { + + typename TreeTy::iterator I = T->begin(), E = T->end(); + + for ( ; I!=E ; ++I, ++TI) + if (TI == TE || !I->ElementEqual(*TI)) + return false; + + return true; + } + + //===--------------------------------------------------===// + // "CreateNode" is used to generate new tree roots that link + // to other trees. The functon may also simply move links + // in an existing root if that root is still marked mutable. + // This is necessary because otherwise our balancing code + // would leak memory as it would create nodes that are + // then discarded later before the finished tree is + // returned to the caller. + //===--------------------------------------------------===// + + TreeTy* CreateNode(TreeTy* L, value_type_ref V, TreeTy* R) { + // Search the FoldingSet bucket for a Tree with the same digest. + FoldingSetNodeID ID; + unsigned digest = TreeTy::ComputeDigest(L, R, V); + ID.AddInteger(digest); + unsigned hash = ID.ComputeHash(); + + typename CacheTy::bucket_iterator I = Cache.bucket_begin(hash); + typename CacheTy::bucket_iterator E = Cache.bucket_end(hash); + + for (; I != E; ++I) { + TreeTy* T = &*I; + + if (T->ComputeDigest() != digest) + continue; + + // We found a collision. Perform a comparison of Contents('T') + // with Contents('L')+'V'+Contents('R'). + + typename TreeTy::iterator TI = T->begin(), TE = T->end(); + + // First compare Contents('L') with the (initial) contents of T. + if (!CompareTreeWithSection(L, TI, TE)) + continue; + + // Now compare the new data element. + if (TI == TE || !TI->ElementEqual(V)) + continue; + + ++TI; + + // Now compare the remainder of 'T' with 'R'. + if (!CompareTreeWithSection(R, TI, TE)) + continue; + + if (TI != TE) // Contents('R') did not match suffix of 'T'. + continue; + + // Trees did match! Return 'T'. + return T; + } + + // No tree with the contents: Contents('L')+'V'+Contents('R'). + // Create it. + + // Allocate the new tree node and insert it into the cache. + BumpPtrAllocator& A = getAllocator(); + TreeTy* T = (TreeTy*) A.Allocate<TreeTy>(); + new (T) TreeTy(L,R,V,IncrementHeight(L,R)); + + // We do not insert 'T' into the FoldingSet here. This is because + // this tree is still mutable and things may get rebalanced. + // Because our digest is associative and based on the contents of + // the set, this should hopefully not cause any strange bugs. + // 'T' is inserted by 'MarkImmutable'. + + return T; + } + + TreeTy* CreateNode(TreeTy* L, TreeTy* OldTree, TreeTy* R) { + assert (!isEmpty(OldTree)); + + if (OldTree->isMutable()) { + OldTree->setLeft(L); + OldTree->setRight(R); + OldTree->setHeight(IncrementHeight(L,R)); + return OldTree; + } + else return CreateNode(L, Value(OldTree), R); + } + + /// Balance - Used by Add_internal and Remove_internal to + /// balance a newly created tree. + TreeTy* Balance(TreeTy* L, value_type_ref V, TreeTy* R) { + + unsigned hl = Height(L); + unsigned hr = Height(R); + + if (hl > hr + 2) { + assert (!isEmpty(L) && + "Left tree cannot be empty to have a height >= 2."); + + TreeTy* LL = Left(L); + TreeTy* LR = Right(L); + + if (Height(LL) >= Height(LR)) + return CreateNode(LL, L, CreateNode(LR,V,R)); + + assert (!isEmpty(LR) && + "LR cannot be empty because it has a height >= 1."); + + TreeTy* LRL = Left(LR); + TreeTy* LRR = Right(LR); + + return CreateNode(CreateNode(LL,L,LRL), LR, CreateNode(LRR,V,R)); + } + else if (hr > hl + 2) { + assert (!isEmpty(R) && + "Right tree cannot be empty to have a height >= 2."); + + TreeTy* RL = Left(R); + TreeTy* RR = Right(R); + + if (Height(RR) >= Height(RL)) + return CreateNode(CreateNode(L,V,RL), R, RR); + + assert (!isEmpty(RL) && + "RL cannot be empty because it has a height >= 1."); + + TreeTy* RLL = Left(RL); + TreeTy* RLR = Right(RL); + + return CreateNode(CreateNode(L,V,RLL), RL, CreateNode(RLR,R,RR)); + } + else + return CreateNode(L,V,R); + } + + /// Add_internal - Creates a new tree that includes the specified + /// data and the data from the original tree. If the original tree + /// already contained the data item, the original tree is returned. + TreeTy* Add_internal(value_type_ref V, TreeTy* T) { + if (isEmpty(T)) + return CreateNode(T, V, T); + + assert (!T->isMutable()); + + key_type_ref K = ImutInfo::KeyOfValue(V); + key_type_ref KCurrent = ImutInfo::KeyOfValue(Value(T)); + + if (ImutInfo::isEqual(K,KCurrent)) + return CreateNode(Left(T), V, Right(T)); + else if (ImutInfo::isLess(K,KCurrent)) + return Balance(Add_internal(V,Left(T)), Value(T), Right(T)); + else + return Balance(Left(T), Value(T), Add_internal(V,Right(T))); + } + + /// Remove_internal - Creates a new tree that includes all the data + /// from the original tree except the specified data. If the + /// specified data did not exist in the original tree, the original + /// tree is returned. + TreeTy* Remove_internal(key_type_ref K, TreeTy* T) { + if (isEmpty(T)) + return T; + + assert (!T->isMutable()); + + key_type_ref KCurrent = ImutInfo::KeyOfValue(Value(T)); + + if (ImutInfo::isEqual(K,KCurrent)) + return CombineLeftRightTrees(Left(T),Right(T)); + else if (ImutInfo::isLess(K,KCurrent)) + return Balance(Remove_internal(K,Left(T)), Value(T), Right(T)); + else + return Balance(Left(T), Value(T), Remove_internal(K,Right(T))); + } + + TreeTy* CombineLeftRightTrees(TreeTy* L, TreeTy* R) { + if (isEmpty(L)) return R; + if (isEmpty(R)) return L; + + TreeTy* OldNode; + TreeTy* NewRight = RemoveMinBinding(R,OldNode); + return Balance(L,Value(OldNode),NewRight); + } + + TreeTy* RemoveMinBinding(TreeTy* T, TreeTy*& NodeRemoved) { + assert (!isEmpty(T)); + + if (isEmpty(Left(T))) { + NodeRemoved = T; + return Right(T); + } + + return Balance(RemoveMinBinding(Left(T),NodeRemoved),Value(T),Right(T)); + } + + /// MarkImmutable - Clears the mutable bits of a root and all of its + /// descendants. + void MarkImmutable(TreeTy* T) { + if (!T || !T->isMutable()) + return; + + T->MarkImmutable(); + MarkImmutable(Left(T)); + MarkImmutable(Right(T)); + + // Now that the node is immutable it can safely be inserted + // into the node cache. + llvm::FoldingSetNodeID ID; + ID.AddInteger(T->ComputeDigest()); + Cache.InsertNode(T, (void*) &*Cache.bucket_end(ID.ComputeHash())); + } +}; + + +//===----------------------------------------------------------------------===// +// Immutable AVL-Tree Iterators. +//===----------------------------------------------------------------------===// + +template <typename ImutInfo> +class ImutAVLTreeGenericIterator { + SmallVector<uintptr_t,20> stack; +public: + enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3, + Flags=0x3 }; + + typedef ImutAVLTree<ImutInfo> TreeTy; + typedef ImutAVLTreeGenericIterator<ImutInfo> _Self; + + inline ImutAVLTreeGenericIterator() {} + inline ImutAVLTreeGenericIterator(const TreeTy* Root) { + if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root)); + } + + TreeTy* operator*() const { + assert (!stack.empty()); + return reinterpret_cast<TreeTy*>(stack.back() & ~Flags); + } + + uintptr_t getVisitState() { + assert (!stack.empty()); + return stack.back() & Flags; + } + + + bool AtEnd() const { return stack.empty(); } + + bool AtBeginning() const { + return stack.size() == 1 && getVisitState() == VisitedNone; + } + + void SkipToParent() { + assert (!stack.empty()); + stack.pop_back(); + + if (stack.empty()) + return; + + switch (getVisitState()) { + case VisitedNone: + stack.back() |= VisitedLeft; + break; + case VisitedLeft: + stack.back() |= VisitedRight; + break; + default: + assert (false && "Unreachable."); + } + } + + inline bool operator==(const _Self& x) const { + if (stack.size() != x.stack.size()) + return false; + + for (unsigned i = 0 ; i < stack.size(); i++) + if (stack[i] != x.stack[i]) + return false; + + return true; + } + + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + _Self& operator++() { + assert (!stack.empty()); + + TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags); + assert (Current); + + switch (getVisitState()) { + case VisitedNone: + if (TreeTy* L = Current->getSafeLeft()) + stack.push_back(reinterpret_cast<uintptr_t>(L)); + else + stack.back() |= VisitedLeft; + + break; + + case VisitedLeft: + if (TreeTy* R = Current->getRight()) + stack.push_back(reinterpret_cast<uintptr_t>(R)); + else + stack.back() |= VisitedRight; + + break; + + case VisitedRight: + SkipToParent(); + break; + + default: + assert (false && "Unreachable."); + } + + return *this; + } + + _Self& operator--() { + assert (!stack.empty()); + + TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags); + assert (Current); + + switch (getVisitState()) { + case VisitedNone: + stack.pop_back(); + break; + + case VisitedLeft: + stack.back() &= ~Flags; // Set state to "VisitedNone." + + if (TreeTy* L = Current->getLeft()) + stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight); + + break; + + case VisitedRight: + stack.back() &= ~Flags; + stack.back() |= VisitedLeft; + + if (TreeTy* R = Current->getRight()) + stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight); + + break; + + default: + assert (false && "Unreachable."); + } + + return *this; + } +}; + +template <typename ImutInfo> +class ImutAVLTreeInOrderIterator { + typedef ImutAVLTreeGenericIterator<ImutInfo> InternalIteratorTy; + InternalIteratorTy InternalItr; + +public: + typedef ImutAVLTree<ImutInfo> TreeTy; + typedef ImutAVLTreeInOrderIterator<ImutInfo> _Self; + + ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) { + if (Root) operator++(); // Advance to first element. + } + + ImutAVLTreeInOrderIterator() : InternalItr() {} + + inline bool operator==(const _Self& x) const { + return InternalItr == x.InternalItr; + } + + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline TreeTy* operator*() const { return *InternalItr; } + inline TreeTy* operator->() const { return *InternalItr; } + + inline _Self& operator++() { + do ++InternalItr; + while (!InternalItr.AtEnd() && + InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft); + + return *this; + } + + inline _Self& operator--() { + do --InternalItr; + while (!InternalItr.AtBeginning() && + InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft); + + return *this; + } + + inline void SkipSubTree() { + InternalItr.SkipToParent(); + + while (!InternalItr.AtEnd() && + InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft) + ++InternalItr; + } +}; + +//===----------------------------------------------------------------------===// +// Trait classes for Profile information. +//===----------------------------------------------------------------------===// + +/// Generic profile template. The default behavior is to invoke the +/// profile method of an object. Specializations for primitive integers +/// and generic handling of pointers is done below. +template <typename T> +struct ImutProfileInfo { + typedef const T value_type; + typedef const T& value_type_ref; + + static inline void Profile(FoldingSetNodeID& ID, value_type_ref X) { + FoldingSetTrait<T>::Profile(X,ID); + } +}; + +/// Profile traits for integers. +template <typename T> +struct ImutProfileInteger { + typedef const T value_type; + typedef const T& value_type_ref; + + static inline void Profile(FoldingSetNodeID& ID, value_type_ref X) { + ID.AddInteger(X); + } +}; + +#define PROFILE_INTEGER_INFO(X)\ +template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {}; + +PROFILE_INTEGER_INFO(char) +PROFILE_INTEGER_INFO(unsigned char) +PROFILE_INTEGER_INFO(short) +PROFILE_INTEGER_INFO(unsigned short) +PROFILE_INTEGER_INFO(unsigned) +PROFILE_INTEGER_INFO(signed) +PROFILE_INTEGER_INFO(long) +PROFILE_INTEGER_INFO(unsigned long) +PROFILE_INTEGER_INFO(long long) +PROFILE_INTEGER_INFO(unsigned long long) + +#undef PROFILE_INTEGER_INFO + +/// Generic profile trait for pointer types. We treat pointers as +/// references to unique objects. +template <typename T> +struct ImutProfileInfo<T*> { + typedef const T* value_type; + typedef value_type value_type_ref; + + static inline void Profile(FoldingSetNodeID &ID, value_type_ref X) { + ID.AddPointer(X); + } +}; + +//===----------------------------------------------------------------------===// +// Trait classes that contain element comparison operators and type +// definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap. These +// inherit from the profile traits (ImutProfileInfo) to include operations +// for element profiling. +//===----------------------------------------------------------------------===// + + +/// ImutContainerInfo - Generic definition of comparison operations for +/// elements of immutable containers that defaults to using +/// std::equal_to<> and std::less<> to perform comparison of elements. +template <typename T> +struct ImutContainerInfo : public ImutProfileInfo<T> { + typedef typename ImutProfileInfo<T>::value_type value_type; + typedef typename ImutProfileInfo<T>::value_type_ref value_type_ref; + typedef value_type key_type; + typedef value_type_ref key_type_ref; + typedef bool data_type; + typedef bool data_type_ref; + + static inline key_type_ref KeyOfValue(value_type_ref D) { return D; } + static inline data_type_ref DataOfValue(value_type_ref) { return true; } + + static inline bool isEqual(key_type_ref LHS, key_type_ref RHS) { + return std::equal_to<key_type>()(LHS,RHS); + } + + static inline bool isLess(key_type_ref LHS, key_type_ref RHS) { + return std::less<key_type>()(LHS,RHS); + } + + static inline bool isDataEqual(data_type_ref,data_type_ref) { return true; } +}; + +/// ImutContainerInfo - Specialization for pointer values to treat pointers +/// as references to unique objects. Pointers are thus compared by +/// their addresses. +template <typename T> +struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> { + typedef typename ImutProfileInfo<T*>::value_type value_type; + typedef typename ImutProfileInfo<T*>::value_type_ref value_type_ref; + typedef value_type key_type; + typedef value_type_ref key_type_ref; + typedef bool data_type; + typedef bool data_type_ref; + + static inline key_type_ref KeyOfValue(value_type_ref D) { return D; } + static inline data_type_ref DataOfValue(value_type_ref) { return true; } + + static inline bool isEqual(key_type_ref LHS, key_type_ref RHS) { + return LHS == RHS; + } + + static inline bool isLess(key_type_ref LHS, key_type_ref RHS) { + return LHS < RHS; + } + + static inline bool isDataEqual(data_type_ref,data_type_ref) { return true; } +}; + +//===----------------------------------------------------------------------===// +// Immutable Set +//===----------------------------------------------------------------------===// + +template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> > +class ImmutableSet { +public: + typedef typename ValInfo::value_type value_type; + typedef typename ValInfo::value_type_ref value_type_ref; + typedef ImutAVLTree<ValInfo> TreeTy; + +private: + TreeTy* Root; + +public: + /// Constructs a set from a pointer to a tree root. In general one + /// should use a Factory object to create sets instead of directly + /// invoking the constructor, but there are cases where make this + /// constructor public is useful. + explicit ImmutableSet(TreeTy* R) : Root(R) {} + + class Factory { + typename TreeTy::Factory F; + + public: + Factory() {} + + Factory(BumpPtrAllocator& Alloc) + : F(Alloc) {} + + /// GetEmptySet - Returns an immutable set that contains no elements. + ImmutableSet GetEmptySet() { return ImmutableSet(F.GetEmptyTree()); } + + /// Add - Creates a new immutable set that contains all of the values + /// of the original set with the addition of the specified value. If + /// the original set already included the value, then the original set is + /// returned and no memory is allocated. The time and space complexity + /// of this operation is logarithmic in the size of the original set. + /// The memory allocated to represent the set is released when the + /// factory object that created the set is destroyed. + ImmutableSet Add(ImmutableSet Old, value_type_ref V) { + return ImmutableSet(F.Add(Old.Root,V)); + } + + /// Remove - Creates a new immutable set that contains all of the values + /// of the original set with the exception of the specified value. If + /// the original set did not contain the value, the original set is + /// returned and no memory is allocated. The time and space complexity + /// of this operation is logarithmic in the size of the original set. + /// The memory allocated to represent the set is released when the + /// factory object that created the set is destroyed. + ImmutableSet Remove(ImmutableSet Old, value_type_ref V) { + return ImmutableSet(F.Remove(Old.Root,V)); + } + + BumpPtrAllocator& getAllocator() { return F.getAllocator(); } + + private: + Factory(const Factory& RHS) {}; + void operator=(const Factory& RHS) {}; + }; + + friend class Factory; + + /// contains - Returns true if the set contains the specified value. + bool contains(const value_type_ref V) const { + return Root ? Root->contains(V) : false; + } + + bool operator==(ImmutableSet RHS) const { + return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; + } + + bool operator!=(ImmutableSet RHS) const { + return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; + } + + TreeTy* getRoot() const { return Root; } + + /// isEmpty - Return true if the set contains no elements. + bool isEmpty() const { return !Root; } + + /// isSingleton - Return true if the set contains exactly one element. + /// This method runs in constant time. + bool isSingleton() const { return getHeight() == 1; } + + template <typename Callback> + void foreach(Callback& C) { if (Root) Root->foreach(C); } + + template <typename Callback> + void foreach() { if (Root) { Callback C; Root->foreach(C); } } + + //===--------------------------------------------------===// + // Iterators. + //===--------------------------------------------------===// + + class iterator { + typename TreeTy::iterator itr; + + iterator() {} + iterator(TreeTy* t) : itr(t) {} + friend class ImmutableSet<ValT,ValInfo>; + public: + inline value_type_ref operator*() const { return itr->getValue(); } + inline iterator& operator++() { ++itr; return *this; } + inline iterator operator++(int) { iterator tmp(*this); ++itr; return tmp; } + inline iterator& operator--() { --itr; return *this; } + inline iterator operator--(int) { iterator tmp(*this); --itr; return tmp; } + inline bool operator==(const iterator& RHS) const { return RHS.itr == itr; } + inline bool operator!=(const iterator& RHS) const { return RHS.itr != itr; } + inline value_type *operator->() const { return &(operator*()); } + }; + + iterator begin() const { return iterator(Root); } + iterator end() const { return iterator(); } + + //===--------------------------------------------------===// + // Utility methods. + //===--------------------------------------------------===// + + inline unsigned getHeight() const { return Root ? Root->getHeight() : 0; } + + static inline void Profile(FoldingSetNodeID& ID, const ImmutableSet& S) { + ID.AddPointer(S.Root); + } + + inline void Profile(FoldingSetNodeID& ID) const { + return Profile(ID,*this); + } + + //===--------------------------------------------------===// + // For testing. + //===--------------------------------------------------===// + + void verify() const { if (Root) Root->verify(); } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/IndexedMap.h b/include/llvm/ADT/IndexedMap.h new file mode 100644 index 0000000..ff5d3a1 --- /dev/null +++ b/include/llvm/ADT/IndexedMap.h @@ -0,0 +1,75 @@ +//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements an indexed map. The index map template takes two +// types. The first is the mapped type and the second is a functor +// that maps its argument to a size_t. On instantiation a "null" value +// can be provided to be used as a "does not exist" indicator in the +// map. A member function grow() is provided that given the value of +// the maximally indexed key (the argument of the functor) makes sure +// the map has enough space for it. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_INDEXEDMAP_H +#define LLVM_ADT_INDEXEDMAP_H + +#include <cassert> +#include <functional> +#include <vector> + +namespace llvm { + + struct IdentityFunctor : std::unary_function<unsigned, unsigned> { + unsigned operator()(unsigned Index) const { + return Index; + } + }; + + template <typename T, typename ToIndexT = IdentityFunctor> + class IndexedMap { + typedef typename ToIndexT::argument_type IndexT; + typedef std::vector<T> StorageT; + StorageT storage_; + T nullVal_; + ToIndexT toIndex_; + + public: + IndexedMap() : nullVal_(T()) { } + + explicit IndexedMap(const T& val) : nullVal_(val) { } + + typename StorageT::reference operator[](IndexT n) { + assert(toIndex_(n) < storage_.size() && "index out of bounds!"); + return storage_[toIndex_(n)]; + } + + typename StorageT::const_reference operator[](IndexT n) const { + assert(toIndex_(n) < storage_.size() && "index out of bounds!"); + return storage_[toIndex_(n)]; + } + + void clear() { + storage_.clear(); + } + + void grow(IndexT n) { + unsigned NewSize = toIndex_(n) + 1; + if (NewSize > storage_.size()) + storage_.resize(NewSize, nullVal_); + } + + typename StorageT::size_type size() const { + return storage_.size(); + } + }; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/IntrusiveRefCntPtr.h b/include/llvm/ADT/IntrusiveRefCntPtr.h new file mode 100644 index 0000000..37d4ac9 --- /dev/null +++ b/include/llvm/ADT/IntrusiveRefCntPtr.h @@ -0,0 +1,230 @@ +//== llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer ---*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines IntrusiveRefCntPtr, a template class that +// implements a "smart" pointer for objects that maintain their own +// internal reference count, and RefCountedBase/RefCountedBaseVPTR, two +// generic base classes for objects that wish to have their lifetimes +// managed using reference counting. +// +// IntrusiveRefCntPtr is similar to Boost's intrusive_ptr with added +// LLVM-style casting. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_INTRUSIVE_REF_CNT_PTR +#define LLVM_ADT_INTRUSIVE_REF_CNT_PTR + +#include <cassert> + +#include "llvm/Support/Casting.h" + +namespace llvm { + + template <class T> + class IntrusiveRefCntPtr; + +//===----------------------------------------------------------------------===// +/// RefCountedBase - A generic base class for objects that wish to +/// have their lifetimes managed using reference counts. Classes +/// subclass RefCountedBase to obtain such functionality, and are +/// typically handled with IntrusivePtr "smart pointers" (see below) +/// which automatically handle the management of reference counts. +/// Objects that subclass RefCountedBase should not be allocated on +/// the stack, as invoking "delete" (which is called when the +/// reference count hits 0) on such objects is an error. +//===----------------------------------------------------------------------===// + template <class Derived> + class RefCountedBase { + unsigned ref_cnt; + + protected: + RefCountedBase() : ref_cnt(0) {} + + void Retain() { ++ref_cnt; } + void Release() { + assert (ref_cnt > 0 && "Reference count is already zero."); + if (--ref_cnt == 0) delete static_cast<Derived*>(this); + } + + friend class IntrusiveRefCntPtr<Derived>; + }; + +//===----------------------------------------------------------------------===// +/// RefCountedBaseVPTR - A class that has the same function as +/// RefCountedBase, but with a virtual destructor. Should be used +/// instead of RefCountedBase for classes that already have virtual +/// methods to enforce dynamic allocation via 'new'. Classes that +/// inherit from RefCountedBaseVPTR can't be allocated on stack - +/// attempting to do this will produce a compile error. +//===----------------------------------------------------------------------===// + template <class Derived> + class RefCountedBaseVPTR { + unsigned ref_cnt; + + protected: + RefCountedBaseVPTR() : ref_cnt(0) {} + virtual ~RefCountedBaseVPTR() {} + + void Retain() { ++ref_cnt; } + void Release() { + assert (ref_cnt > 0 && "Reference count is already zero."); + if (--ref_cnt == 0) delete this; + } + + friend class IntrusiveRefCntPtr<Derived>; + }; + +//===----------------------------------------------------------------------===// +/// IntrusiveRefCntPtr - A template class that implements a "smart pointer" +/// that assumes the wrapped object has a reference count associated +/// with it that can be managed via calls to +/// IntrusivePtrAddRef/IntrusivePtrRelease. The smart pointers +/// manage reference counts via the RAII idiom: upon creation of +/// smart pointer the reference count of the wrapped object is +/// incremented and upon destruction of the smart pointer the +/// reference count is decremented. This class also safely handles +/// wrapping NULL pointers. +/// +/// Reference counting is implemented via calls to +/// Obj->Retain()/Obj->Release(). Release() is required to destroy +/// the object when the reference count reaches zero. Inheriting from +/// RefCountedBase/RefCountedBaseVPTR takes care of this +/// automatically. +//===----------------------------------------------------------------------===// + template <typename T> + class IntrusiveRefCntPtr { + T* Obj; + typedef IntrusiveRefCntPtr this_type; + public: + typedef T element_type; + + explicit IntrusiveRefCntPtr() : Obj(0) {} + + explicit IntrusiveRefCntPtr(T* obj) : Obj(obj) { + retain(); + } + + IntrusiveRefCntPtr(const IntrusiveRefCntPtr& S) : Obj(S.Obj) { + retain(); + } + + template <class X> + IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S) + : Obj(S.getPtr()) { + retain(); + } + + IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr& S) { + replace(S.getPtr()); + return *this; + } + + template <class X> + IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr<X>& S) { + replace(S.getPtr()); + return *this; + } + + IntrusiveRefCntPtr& operator=(T * S) { + replace(S); + return *this; + } + + ~IntrusiveRefCntPtr() { release(); } + + T& operator*() const { return *Obj; } + + T* operator->() const { return Obj; } + + T* getPtr() const { return Obj; } + + typedef T* (IntrusiveRefCntPtr::*unspecified_bool_type) () const; + operator unspecified_bool_type() const { + return Obj == 0 ? 0 : &IntrusiveRefCntPtr::getPtr; + } + + void swap(IntrusiveRefCntPtr& other) { + T* tmp = other.Obj; + other.Obj = Obj; + Obj = tmp; + } + + private: + void retain() { if (Obj) Obj->Retain(); } + void release() { if (Obj) Obj->Release(); } + + void replace(T* S) { + this_type(S).swap(*this); + } + }; + + template<class T, class U> + inline bool operator==(const IntrusiveRefCntPtr<T>& A, + const IntrusiveRefCntPtr<U>& B) + { + return A.getPtr() == B.getPtr(); + } + + template<class T, class U> + inline bool operator!=(const IntrusiveRefCntPtr<T>& A, + const IntrusiveRefCntPtr<U>& B) + { + return A.getPtr() != B.getPtr(); + } + + template<class T, class U> + inline bool operator==(const IntrusiveRefCntPtr<T>& A, + U* B) + { + return A.getPtr() == B; + } + + template<class T, class U> + inline bool operator!=(const IntrusiveRefCntPtr<T>& A, + U* B) + { + return A.getPtr() != B; + } + + template<class T, class U> + inline bool operator==(T* A, + const IntrusiveRefCntPtr<U>& B) + { + return A == B.getPtr(); + } + + template<class T, class U> + inline bool operator!=(T* A, + const IntrusiveRefCntPtr<U>& B) + { + return A != B.getPtr(); + } + +//===----------------------------------------------------------------------===// +// LLVM-style downcasting support for IntrusiveRefCntPtr objects +//===----------------------------------------------------------------------===// + + template<class T> struct simplify_type<IntrusiveRefCntPtr<T> > { + typedef T* SimpleType; + static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T>& Val) { + return Val.getPtr(); + } + }; + + template<class T> struct simplify_type<const IntrusiveRefCntPtr<T> > { + typedef T* SimpleType; + static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T>& Val) { + return Val.getPtr(); + } + }; + +} // end namespace llvm + +#endif // LLVM_ADT_INTRUSIVE_REF_CNT_PTR diff --git a/include/llvm/ADT/OwningPtr.h b/include/llvm/ADT/OwningPtr.h new file mode 100644 index 0000000..cc53c8c --- /dev/null +++ b/include/llvm/ADT/OwningPtr.h @@ -0,0 +1,134 @@ +//===- llvm/ADT/OwningPtr.h - Smart ptr that owns the pointee ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines and implements the OwningPtr class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_OWNING_PTR_H +#define LLVM_ADT_OWNING_PTR_H + +#include <cassert> +#include <cstddef> + +namespace llvm { + +/// OwningPtr smart pointer - OwningPtr mimics a built-in pointer except that it +/// guarantees deletion of the object pointed to, either on destruction of the +/// OwningPtr or via an explicit reset(). Once created, ownership of the +/// pointee object can be taken away from OwningPtr by using the take method. +template<class T> +class OwningPtr { + OwningPtr(OwningPtr const &); // DO NOT IMPLEMENT + OwningPtr &operator=(OwningPtr const &); // DO NOT IMPLEMENT + T *Ptr; +public: + explicit OwningPtr(T *P = 0) : Ptr(P) {} + + ~OwningPtr() { + delete Ptr; + } + + /// reset - Change the current pointee to the specified pointer. Note that + /// calling this with any pointer (including a null pointer) deletes the + /// current pointer. + void reset(T *P = 0) { + if (P == Ptr) return; + T *Tmp = Ptr; + Ptr = P; + delete Tmp; + } + + /// take - Reset the owning pointer to null and return its pointer. This does + /// not delete the pointer before returning it. + T *take() { + T *Tmp = Ptr; + Ptr = 0; + return Tmp; + } + + T &operator*() const { + assert(Ptr && "Cannot dereference null pointer"); + return *Ptr; + } + + T *operator->() const { return Ptr; } + T *get() const { return Ptr; } + operator bool() const { return Ptr != 0; } + bool operator!() const { return Ptr == 0; } + + void swap(OwningPtr &RHS) { + T *Tmp = RHS.Ptr; + RHS.Ptr = Ptr; + Ptr = Tmp; + } +}; + +template<class T> +inline void swap(OwningPtr<T> &a, OwningPtr<T> &b) { + a.swap(b); +} + +/// OwningArrayPtr smart pointer - OwningArrayPtr provides the same +/// functionality as OwningPtr, except that it works for array types. +template<class T> +class OwningArrayPtr { + OwningArrayPtr(OwningArrayPtr const &); // DO NOT IMPLEMENT + OwningArrayPtr &operator=(OwningArrayPtr const &); // DO NOT IMPLEMENT + T *Ptr; +public: + explicit OwningArrayPtr(T *P = 0) : Ptr(P) {} + + ~OwningArrayPtr() { + delete [] Ptr; + } + + /// reset - Change the current pointee to the specified pointer. Note that + /// calling this with any pointer (including a null pointer) deletes the + /// current pointer. + void reset(T *P = 0) { + if (P == Ptr) return; + T *Tmp = Ptr; + Ptr = P; + delete [] Tmp; + } + + /// take - Reset the owning pointer to null and return its pointer. This does + /// not delete the pointer before returning it. + T *take() { + T *Tmp = Ptr; + Ptr = 0; + return Tmp; + } + + T &operator[](std::ptrdiff_t i) const { + assert(Ptr && "Cannot dereference null pointer"); + return Ptr[i]; + } + + T *get() const { return Ptr; } + operator bool() const { return Ptr != 0; } + bool operator!() const { return Ptr == 0; } + + void swap(OwningArrayPtr &RHS) { + T *Tmp = RHS.Ptr; + RHS.Ptr = Ptr; + Ptr = Tmp; + } +}; + +template<class T> +inline void swap(OwningArrayPtr<T> &a, OwningArrayPtr<T> &b) { + a.swap(b); +} + + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/PointerIntPair.h b/include/llvm/ADT/PointerIntPair.h new file mode 100644 index 0000000..0aa478b --- /dev/null +++ b/include/llvm/ADT/PointerIntPair.h @@ -0,0 +1,150 @@ +//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerIntPair class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POINTERINTPAIR_H +#define LLVM_ADT_POINTERINTPAIR_H + +#include "llvm/Support/PointerLikeTypeTraits.h" +#include <cassert> + +namespace llvm { + +template<typename T> +struct DenseMapInfo; + +/// PointerIntPair - This class implements a pair of a pointer and small +/// integer. It is designed to represent this in the space required by one +/// pointer by bitmangling the integer into the low part of the pointer. This +/// can only be done for small integers: typically up to 3 bits, but it depends +/// on the number of bits available according to PointerLikeTypeTraits for the +/// type. +/// +/// Note that PointerIntPair always puts the Int part in the highest bits +/// possible. For example, PointerIntPair<void*, 1, bool> will put the bit for +/// the bool into bit #2, not bit #0, which allows the low two bits to be used +/// for something else. For example, this allows: +/// PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool> +/// ... and the two bools will land in different bits. +/// +template <typename PointerTy, unsigned IntBits, typename IntType=unsigned, + typename PtrTraits = PointerLikeTypeTraits<PointerTy> > +class PointerIntPair { + intptr_t Value; + enum { + /// PointerBitMask - The bits that come from the pointer. + PointerBitMask = + ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable)-1), + + /// IntShift - The number of low bits that we reserve for other uses, and + /// keep zero. + IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable-IntBits, + + /// IntMask - This is the unshifted mask for valid bits of the int type. + IntMask = (uintptr_t)(((intptr_t)1 << IntBits)-1), + + // ShiftedIntMask - This is the bits for the integer shifted in place. + ShiftedIntMask = (uintptr_t)(IntMask << IntShift) + }; +public: + PointerIntPair() : Value(0) {} + PointerIntPair(PointerTy Ptr, IntType Int) : Value(0) { + assert(IntBits <= PtrTraits::NumLowBitsAvailable && + "PointerIntPair formed with integer size too large for pointer"); + setPointer(Ptr); + setInt(Int); + } + + PointerTy getPointer() const { + return reinterpret_cast<PointerTy>(Value & PointerBitMask); + } + + IntType getInt() const { + return (IntType)((Value >> IntShift) & IntMask); + } + + void setPointer(PointerTy Ptr) { + intptr_t PtrVal = reinterpret_cast<intptr_t>(Ptr); + assert((PtrVal & ((1 << PtrTraits::NumLowBitsAvailable)-1)) == 0 && + "Pointer is not sufficiently aligned"); + // Preserve all low bits, just update the pointer. + Value = PtrVal | (Value & ~PointerBitMask); + } + + void setInt(IntType Int) { + intptr_t IntVal = Int; + assert(IntVal < (1 << IntBits) && "Integer too large for field"); + + // Preserve all bits other than the ones we are updating. + Value &= ~ShiftedIntMask; // Remove integer field. + Value |= IntVal << IntShift; // Set new integer. + } + + void *getOpaqueValue() const { return reinterpret_cast<void*>(Value); } + void setFromOpaqueValue(void *Val) { Value = reinterpret_cast<intptr_t>(Val);} + + static PointerIntPair getFromOpaqueValue(void *V) { + PointerIntPair P; P.setFromOpaqueValue(V); return P; + } + + bool operator==(const PointerIntPair &RHS) const {return Value == RHS.Value;} + bool operator!=(const PointerIntPair &RHS) const {return Value != RHS.Value;} + bool operator<(const PointerIntPair &RHS) const {return Value < RHS.Value;} + bool operator>(const PointerIntPair &RHS) const {return Value > RHS.Value;} + bool operator<=(const PointerIntPair &RHS) const {return Value <= RHS.Value;} + bool operator>=(const PointerIntPair &RHS) const {return Value >= RHS.Value;} +}; + +// Provide specialization of DenseMapInfo for PointerIntPair. +template<typename PointerTy, unsigned IntBits, typename IntType> +struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType> > { + typedef PointerIntPair<PointerTy, IntBits, IntType> Ty; + static Ty getEmptyKey() { + intptr_t Val = -1; + Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable; + return Ty(reinterpret_cast<PointerTy>(Val), IntType((1 << IntBits)-1)); + } + static Ty getTombstoneKey() { + intptr_t Val = -2; + Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable; + return Ty(reinterpret_cast<PointerTy>(Val), IntType(0)); + } + static unsigned getHashValue(Ty V) { + uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue()); + return unsigned(IV) ^ unsigned(IV >> 9); + } + static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; } + static bool isPod() { return true; } +}; + +// Teach SmallPtrSet that PointerIntPair is "basically a pointer". +template<typename PointerTy, unsigned IntBits, typename IntType, + typename PtrTraits> +class PointerLikeTypeTraits<PointerIntPair<PointerTy, IntBits, IntType, + PtrTraits> > { +public: + static inline void * + getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) { + return P.getOpaqueValue(); + } + static inline PointerIntPair<PointerTy, IntBits, IntType> + getFromVoidPointer(void *P) { + return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P); + } + enum { + NumLowBitsAvailable = + PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable - IntBits + }; +}; + +} // end namespace llvm +#endif diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h new file mode 100644 index 0000000..b3baec1 --- /dev/null +++ b/include/llvm/ADT/PointerUnion.h @@ -0,0 +1,259 @@ +//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerUnion class, which is a discriminated union of +// pointer types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POINTERUNION_H +#define LLVM_ADT_POINTERUNION_H + +#include "llvm/ADT/PointerIntPair.h" + +namespace llvm { + + /// getPointerUnionTypeNum - If the argument has type PT1* or PT2* return + /// false or true respectively. + template <typename PT1, typename PT2> + static inline int getPointerUnionTypeNum(PT1 *P) { return 0; } + template <typename PT1, typename PT2> + static inline int getPointerUnionTypeNum(PT2 *P) { return 1; } + template <typename PT1, typename PT2> + static inline int getPointerUnionTypeNum(...) { return -1; } + + + /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion + /// for the two template arguments. + template <typename PT1, typename PT2> + class PointerUnionUIntTraits { + public: + static inline void *getAsVoidPointer(void *P) { return P; } + static inline void *getFromVoidPointer(void *P) { return P; } + enum { + PT1BitsAv = PointerLikeTypeTraits<PT1>::NumLowBitsAvailable, + PT2BitsAv = PointerLikeTypeTraits<PT2>::NumLowBitsAvailable, + NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv + }; + }; + + /// PointerUnion - This implements a discriminated union of two pointer types, + /// and keeps the discriminator bit-mangled into the low bits of the pointer. + /// This allows the implementation to be extremely efficient in space, but + /// permits a very natural and type-safe API. + /// + /// Common use patterns would be something like this: + /// PointerUnion<int*, float*> P; + /// P = (int*)0; + /// printf("%d %d", P.is<int*>(), P.is<float*>()); // prints "1 0" + /// X = P.get<int*>(); // ok. + /// Y = P.get<float*>(); // runtime assertion failure. + /// Z = P.get<double*>(); // runtime assertion failure (regardless of tag) + /// P = (float*)0; + /// Y = P.get<float*>(); // ok. + /// X = P.get<int*>(); // runtime assertion failure. + template <typename PT1, typename PT2> + class PointerUnion { + public: + typedef PointerIntPair<void*, 1, bool, + PointerUnionUIntTraits<PT1,PT2> > ValTy; + private: + ValTy Val; + public: + PointerUnion() {} + + PointerUnion(PT1 V) { + Val.setPointer( + const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(V))); + Val.setInt(0); + } + PointerUnion(PT2 V) { + Val.setPointer( + const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(V))); + Val.setInt(1); + } + + /// isNull - Return true if the pointer help in the union is null, + /// regardless of which type it is. + bool isNull() const { return Val.getPointer() == 0; } + operator bool() const { return !isNull(); } + + /// is<T>() return true if the Union currently holds the type matching T. + template<typename T> + int is() const { + int TyNo = ::llvm::getPointerUnionTypeNum<PT1, PT2>((T*)0); + assert(TyNo != -1 && "Type query could never succeed on PointerUnion!"); + return Val.getInt() == TyNo; + } + + /// get<T>() - Return the value of the specified pointer type. If the + /// specified pointer type is incorrect, assert. + template<typename T> + T get() const { + assert(is<T>() && "Invalid accessor called"); + return PointerLikeTypeTraits<T>::getFromVoidPointer(Val.getPointer()); + } + + /// dyn_cast<T>() - If the current value is of the specified pointer type, + /// return it, otherwise return null. + template<typename T> + T dyn_cast() const { + if (is<T>()) return get<T>(); + return T(); + } + + /// Assignment operators - Allow assigning into this union from either + /// pointer type, setting the discriminator to remember what it came from. + const PointerUnion &operator=(const PT1 &RHS) { + Val.setPointer( + const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(RHS))); + Val.setInt(0); + return *this; + } + const PointerUnion &operator=(const PT2 &RHS) { + Val.setPointer( + const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(RHS))); + Val.setInt(1); + return *this; + } + + void *getOpaqueValue() const { return Val.getOpaqueValue(); } + static PointerUnion getFromOpaqueValue(void *VP) { + PointerUnion V; + V.Val = ValTy::getFromOpaqueValue(VP); + return V; + } + }; + + // Teach SmallPtrSet that PointerUnion is "basically a pointer", that has + // # low bits available = min(PT1bits,PT2bits)-1. + template<typename PT1, typename PT2> + class PointerLikeTypeTraits<PointerUnion<PT1, PT2> > { + public: + static inline void * + getAsVoidPointer(const PointerUnion<PT1, PT2> &P) { + return P.getOpaqueValue(); + } + static inline PointerUnion<PT1, PT2> + getFromVoidPointer(void *P) { + return PointerUnion<PT1, PT2>::getFromOpaqueValue(P); + } + + // The number of bits available are the min of the two pointer types. + enum { + NumLowBitsAvailable = + PointerLikeTypeTraits<typename PointerUnion<PT1,PT2>::ValTy> + ::NumLowBitsAvailable + }; + }; + + + /// PointerUnion3 - This is a pointer union of three pointer types. See + /// documentation for PointerUnion for usage. + template <typename PT1, typename PT2, typename PT3> + class PointerUnion3 { + public: + typedef PointerUnion<PT1, PT2> InnerUnion; + typedef PointerUnion<InnerUnion, PT3> ValTy; + private: + ValTy Val; + public: + PointerUnion3() {} + + PointerUnion3(PT1 V) { + Val = InnerUnion(V); + } + PointerUnion3(PT2 V) { + Val = InnerUnion(V); + } + PointerUnion3(PT3 V) { + Val = V; + } + + /// isNull - Return true if the pointer help in the union is null, + /// regardless of which type it is. + bool isNull() const { return Val.isNull(); } + operator bool() const { return !isNull(); } + + /// is<T>() return true if the Union currently holds the type matching T. + template<typename T> + int is() const { + // Is it PT1/PT2? + if (::llvm::getPointerUnionTypeNum<PT1, PT2>((T*)0) != -1) + return Val.is<InnerUnion>() && Val.get<InnerUnion>().is<T>(); + return Val.is<T>(); + } + + /// get<T>() - Return the value of the specified pointer type. If the + /// specified pointer type is incorrect, assert. + template<typename T> + T get() const { + assert(is<T>() && "Invalid accessor called"); + // Is it PT1/PT2? + if (::llvm::getPointerUnionTypeNum<PT1, PT2>((T*)0) != -1) + return Val.get<InnerUnion>().get<T>(); + + return Val.get<T>(); + } + + /// dyn_cast<T>() - If the current value is of the specified pointer type, + /// return it, otherwise return null. + template<typename T> + T dyn_cast() const { + if (is<T>()) return get<T>(); + return T(); + } + + /// Assignment operators - Allow assigning into this union from either + /// pointer type, setting the discriminator to remember what it came from. + const PointerUnion3 &operator=(const PT1 &RHS) { + Val = InnerUnion(RHS); + return *this; + } + const PointerUnion3 &operator=(const PT2 &RHS) { + Val = InnerUnion(RHS); + return *this; + } + const PointerUnion3 &operator=(const PT3 &RHS) { + Val = RHS; + return *this; + } + + void *getOpaqueValue() const { return Val.getOpaqueValue(); } + static PointerUnion3 getFromOpaqueValue(void *VP) { + PointerUnion3 V; + V.Val = ValTy::getFromOpaqueValue(VP); + return V; + } + }; + + // Teach SmallPtrSet that PointerUnion3 is "basically a pointer", that has + // # low bits available = min(PT1bits,PT2bits,PT2bits)-2. + template<typename PT1, typename PT2, typename PT3> + class PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3> > { + public: + static inline void * + getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) { + return P.getOpaqueValue(); + } + static inline PointerUnion3<PT1, PT2, PT3> + getFromVoidPointer(void *P) { + return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P); + } + + // The number of bits available are the min of the two pointer types. + enum { + NumLowBitsAvailable = + PointerLikeTypeTraits<typename PointerUnion3<PT1, PT2, PT3>::ValTy> + ::NumLowBitsAvailable + }; + }; +} + +#endif diff --git a/include/llvm/ADT/PostOrderIterator.h b/include/llvm/ADT/PostOrderIterator.h new file mode 100644 index 0000000..bf7ce9d --- /dev/null +++ b/include/llvm/ADT/PostOrderIterator.h @@ -0,0 +1,231 @@ +//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file builds on the ADT/GraphTraits.h file to build a generic graph +// post order iterator. This should work over any graph type that has a +// GraphTraits specialization. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POSTORDERITERATOR_H +#define LLVM_ADT_POSTORDERITERATOR_H + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/iterator.h" +#include <set> +#include <stack> +#include <vector> + +namespace llvm { + +template<class SetType, bool External> // Non-external set +class po_iterator_storage { +public: + SetType Visited; +}; + +template<class SetType> +class po_iterator_storage<SetType, true> { +public: + po_iterator_storage(SetType &VSet) : Visited(VSet) {} + po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {} + SetType &Visited; +}; + +template<class GraphT, + class SetType = std::set<typename GraphTraits<GraphT>::NodeType*>, + bool ExtStorage = false, + class GT = GraphTraits<GraphT> > +class po_iterator : public forward_iterator<typename GT::NodeType, ptrdiff_t>, + public po_iterator_storage<SetType, ExtStorage> { + typedef forward_iterator<typename GT::NodeType, ptrdiff_t> super; + typedef typename GT::NodeType NodeType; + typedef typename GT::ChildIteratorType ChildItTy; + + // VisitStack - Used to maintain the ordering. Top = current block + // First element is basic block pointer, second is the 'next child' to visit + std::stack<std::pair<NodeType *, ChildItTy> > VisitStack; + + void traverseChild() { + while (VisitStack.top().second != GT::child_end(VisitStack.top().first)) { + NodeType *BB = *VisitStack.top().second++; + if (!this->Visited.count(BB)) { // If the block is not visited... + this->Visited.insert(BB); + VisitStack.push(std::make_pair(BB, GT::child_begin(BB))); + } + } + } + + inline po_iterator(NodeType *BB) { + this->Visited.insert(BB); + VisitStack.push(std::make_pair(BB, GT::child_begin(BB))); + traverseChild(); + } + inline po_iterator() {} // End is when stack is empty. + + inline po_iterator(NodeType *BB, SetType &S) : + po_iterator_storage<SetType, ExtStorage>(&S) { + if(!S.count(BB)) { + this->Visited.insert(BB); + VisitStack.push(std::make_pair(BB, GT::child_begin(BB))); + traverseChild(); + } + } + + inline po_iterator(SetType &S) : + po_iterator_storage<SetType, ExtStorage>(&S) { + } // End is when stack is empty. +public: + typedef typename super::pointer pointer; + typedef po_iterator<GraphT, SetType, ExtStorage, GT> _Self; + + // Provide static "constructors"... + static inline _Self begin(GraphT G) { return _Self(GT::getEntryNode(G)); } + static inline _Self end (GraphT G) { return _Self(); } + + static inline _Self begin(GraphT G, SetType &S) { + return _Self(GT::getEntryNode(G), S); + } + static inline _Self end (GraphT G, SetType &S) { return _Self(S); } + + inline bool operator==(const _Self& x) const { + return VisitStack == x.VisitStack; + } + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline pointer operator*() const { + return VisitStack.top().first; + } + + // This is a nonstandard operator-> that dereferences the pointer an extra + // time... so that you can actually call methods ON the BasicBlock, because + // the contained type is a pointer. This allows BBIt->getTerminator() f.e. + // + inline NodeType *operator->() const { return operator*(); } + + inline _Self& operator++() { // Preincrement + VisitStack.pop(); + if (!VisitStack.empty()) + traverseChild(); + return *this; + } + + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } +}; + +// Provide global constructors that automatically figure out correct types... +// +template <class T> +po_iterator<T> po_begin(T G) { return po_iterator<T>::begin(G); } +template <class T> +po_iterator<T> po_end (T G) { return po_iterator<T>::end(G); } + +// Provide global definitions of external postorder iterators... +template<class T, class SetType=std::set<typename GraphTraits<T>::NodeType*> > +struct po_ext_iterator : public po_iterator<T, SetType, true> { + po_ext_iterator(const po_iterator<T, SetType, true> &V) : + po_iterator<T, SetType, true>(V) {} +}; + +template<class T, class SetType> +po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) { + return po_ext_iterator<T, SetType>::begin(G, S); +} + +template<class T, class SetType> +po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) { + return po_ext_iterator<T, SetType>::end(G, S); +} + +// Provide global definitions of inverse post order iterators... +template <class T, + class SetType = std::set<typename GraphTraits<T>::NodeType*>, + bool External = false> +struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External > { + ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) : + po_iterator<Inverse<T>, SetType, External> (V) {} +}; + +template <class T> +ipo_iterator<T> ipo_begin(T G, bool Reverse = false) { + return ipo_iterator<T>::begin(G, Reverse); +} + +template <class T> +ipo_iterator<T> ipo_end(T G){ + return ipo_iterator<T>::end(G); +} + +//Provide global definitions of external inverse postorder iterators... +template <class T, + class SetType = std::set<typename GraphTraits<T>::NodeType*> > +struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> { + ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) : + ipo_iterator<T, SetType, true>(&V) {} + ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) : + ipo_iterator<T, SetType, true>(&V) {} +}; + +template <class T, class SetType> +ipo_ext_iterator<T, SetType> ipo_ext_begin(T G, SetType &S) { + return ipo_ext_iterator<T, SetType>::begin(G, S); +} + +template <class T, class SetType> +ipo_ext_iterator<T, SetType> ipo_ext_end(T G, SetType &S) { + return ipo_ext_iterator<T, SetType>::end(G, S); +} + +//===--------------------------------------------------------------------===// +// Reverse Post Order CFG iterator code +//===--------------------------------------------------------------------===// +// +// This is used to visit basic blocks in a method in reverse post order. This +// class is awkward to use because I don't know a good incremental algorithm to +// computer RPO from a graph. Because of this, the construction of the +// ReversePostOrderTraversal object is expensive (it must walk the entire graph +// with a postorder iterator to build the data structures). The moral of this +// story is: Don't create more ReversePostOrderTraversal classes than necessary. +// +// This class should be used like this: +// { +// ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create +// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) { +// ... +// } +// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) { +// ... +// } +// } +// + +template<class GraphT, class GT = GraphTraits<GraphT> > +class ReversePostOrderTraversal { + typedef typename GT::NodeType NodeType; + std::vector<NodeType*> Blocks; // Block list in normal PO order + inline void Initialize(NodeType *BB) { + copy(po_begin(BB), po_end(BB), back_inserter(Blocks)); + } +public: + typedef typename std::vector<NodeType*>::reverse_iterator rpo_iterator; + + inline ReversePostOrderTraversal(GraphT G) { + Initialize(GT::getEntryNode(G)); + } + + // Because we want a reverse post order, use reverse iterators from the vector + inline rpo_iterator begin() { return Blocks.rbegin(); } + inline rpo_iterator end() { return Blocks.rend(); } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/PriorityQueue.h b/include/llvm/ADT/PriorityQueue.h new file mode 100644 index 0000000..a8809dc --- /dev/null +++ b/include/llvm/ADT/PriorityQueue.h @@ -0,0 +1,83 @@ +//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PriorityQueue class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_PRIORITY_QUEUE_H +#define LLVM_ADT_PRIORITY_QUEUE_H + +#include <queue> + +namespace llvm { + +/// PriorityQueue - This class behaves like std::priority_queue and +/// provides a few additional convenience functions. +/// +template<class T, + class Sequence = std::vector<T>, + class Compare = std::less<typename Sequence::value_type> > +class PriorityQueue : public std::priority_queue<T, Sequence, Compare> { +public: + explicit PriorityQueue(const Compare &compare = Compare(), + const Sequence &sequence = Sequence()) + : std::priority_queue<T, Sequence, Compare>(compare, sequence) + {} + + template<class Iterator> + PriorityQueue(Iterator begin, Iterator end, + const Compare &compare = Compare(), + const Sequence &sequence = Sequence()) + : std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence) + {} + + /// erase_one - Erase one element from the queue, regardless of its + /// position. This operation performs a linear search to find an element + /// equal to t, but then uses all logarithmic-time algorithms to do + /// the erase operation. + /// + void erase_one(const T &t) { + // Linear-search to find the element. + typename Sequence::size_type i = + std::find(this->c.begin(), this->c.end(), t) - this->c.begin(); + + // Logarithmic-time heap bubble-up. + while (i != 0) { + typename Sequence::size_type parent = (i - 1) / 2; + this->c[i] = this->c[parent]; + i = parent; + } + + // The element we want to remove is now at the root, so we can use + // priority_queue's plain pop to remove it. + this->pop(); + } + + /// reheapify - If an element in the queue has changed in a way that + /// affects its standing in the comparison function, the queue's + /// internal state becomes invalid. Calling reheapify() resets the + /// queue's state, making it valid again. This operation has time + /// complexity proportional to the number of elements in the queue, + /// so don't plan to use it a lot. + /// + void reheapify() { + std::make_heap(this->c.begin(), this->c.end(), this->comp); + } + + /// clear - Erase all elements from the queue. + /// + void clear() { + this->c.clear(); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/SCCIterator.h b/include/llvm/ADT/SCCIterator.h new file mode 100644 index 0000000..e28f4ca --- /dev/null +++ b/include/llvm/ADT/SCCIterator.h @@ -0,0 +1,199 @@ +//===-- Support/SCCIterator.h - Strongly Connected Comp. Iter. --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This builds on the llvm/ADT/GraphTraits.h file to find the strongly connected +// components (SCCs) of a graph in O(N+E) time using Tarjan's DFS algorithm. +// +// The SCC iterator has the important property that if a node in SCC S1 has an +// edge to a node in SCC S2, then it visits S1 *after* S2. +// +// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. +// (NOTE: This requires some simple wrappers and is not supported yet.) +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SCCITERATOR_H +#define LLVM_ADT_SCCITERATOR_H + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/iterator.h" +#include <map> +#include <vector> + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// scc_iterator - Enumerate the SCCs of a directed graph, in +/// reverse topological order of the SCC DAG. +/// +template<class GraphT, class GT = GraphTraits<GraphT> > +class scc_iterator + : public forward_iterator<std::vector<typename GT::NodeType>, ptrdiff_t> { + typedef typename GT::NodeType NodeType; + typedef typename GT::ChildIteratorType ChildItTy; + typedef std::vector<NodeType*> SccTy; + typedef forward_iterator<SccTy, ptrdiff_t> super; + typedef typename super::reference reference; + typedef typename super::pointer pointer; + + // The visit counters used to detect when a complete SCC is on the stack. + // visitNum is the global counter. + // nodeVisitNumbers are per-node visit numbers, also used as DFS flags. + unsigned visitNum; + std::map<NodeType *, unsigned> nodeVisitNumbers; + + // SCCNodeStack - Stack holding nodes of the SCC. + std::vector<NodeType *> SCCNodeStack; + + // CurrentSCC - The current SCC, retrieved using operator*(). + SccTy CurrentSCC; + + // VisitStack - Used to maintain the ordering. Top = current block + // First element is basic block pointer, second is the 'next child' to visit + std::vector<std::pair<NodeType *, ChildItTy> > VisitStack; + + // MinVistNumStack - Stack holding the "min" values for each node in the DFS. + // This is used to track the minimum uplink values for all children of + // the corresponding node on the VisitStack. + std::vector<unsigned> MinVisitNumStack; + + // A single "visit" within the non-recursive DFS traversal. + void DFSVisitOne(NodeType* N) { + ++visitNum; // Global counter for the visit order + nodeVisitNumbers[N] = visitNum; + SCCNodeStack.push_back(N); + MinVisitNumStack.push_back(visitNum); + VisitStack.push_back(std::make_pair(N, GT::child_begin(N))); + //DOUT << "TarjanSCC: Node " << N << + // " : visitNum = " << visitNum << "\n"; + } + + // The stack-based DFS traversal; defined below. + void DFSVisitChildren() { + assert(!VisitStack.empty()); + while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) { + // TOS has at least one more child so continue DFS + NodeType *childN = *VisitStack.back().second++; + if (!nodeVisitNumbers.count(childN)) { + // this node has never been seen + DFSVisitOne(childN); + } else { + unsigned childNum = nodeVisitNumbers[childN]; + if (MinVisitNumStack.back() > childNum) + MinVisitNumStack.back() = childNum; + } + } + } + + // Compute the next SCC using the DFS traversal. + void GetNextSCC() { + assert(VisitStack.size() == MinVisitNumStack.size()); + CurrentSCC.clear(); // Prepare to compute the next SCC + while (!VisitStack.empty()) { + DFSVisitChildren(); + assert(VisitStack.back().second ==GT::child_end(VisitStack.back().first)); + NodeType* visitingN = VisitStack.back().first; + unsigned minVisitNum = MinVisitNumStack.back(); + VisitStack.pop_back(); + MinVisitNumStack.pop_back(); + if (!MinVisitNumStack.empty() && MinVisitNumStack.back() > minVisitNum) + MinVisitNumStack.back() = minVisitNum; + + //DOUT << "TarjanSCC: Popped node " << visitingN << + // " : minVisitNum = " << minVisitNum << "; Node visit num = " << + // nodeVisitNumbers[visitingN] << "\n"; + + if (minVisitNum == nodeVisitNumbers[visitingN]) { + // A full SCC is on the SCCNodeStack! It includes all nodes below + // visitingN on the stack. Copy those nodes to CurrentSCC, + // reset their minVisit values, and return (this suspends + // the DFS traversal till the next ++). + do { + CurrentSCC.push_back(SCCNodeStack.back()); + SCCNodeStack.pop_back(); + nodeVisitNumbers[CurrentSCC.back()] = ~0U; + } while (CurrentSCC.back() != visitingN); + return; + } + } + } + + inline scc_iterator(NodeType *entryN) : visitNum(0) { + DFSVisitOne(entryN); + GetNextSCC(); + } + inline scc_iterator() { /* End is when DFS stack is empty */ } + +public: + typedef scc_iterator<GraphT, GT> _Self; + + // Provide static "constructors"... + static inline _Self begin(GraphT& G) { return _Self(GT::getEntryNode(G)); } + static inline _Self end (GraphT& G) { return _Self(); } + + // Direct loop termination test (I.fini() is more efficient than I == end()) + inline bool fini() const { + assert(!CurrentSCC.empty() || VisitStack.empty()); + return CurrentSCC.empty(); + } + + inline bool operator==(const _Self& x) const { + return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC; + } + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + // Iterator traversal: forward iteration only + inline _Self& operator++() { // Preincrement + GetNextSCC(); + return *this; + } + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } + + // Retrieve a reference to the current SCC + inline const SccTy &operator*() const { + assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!"); + return CurrentSCC; + } + inline SccTy &operator*() { + assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!"); + return CurrentSCC; + } + + // hasLoop() -- Test if the current SCC has a loop. If it has more than one + // node, this is trivially true. If not, it may still contain a loop if the + // node has an edge back to itself. + bool hasLoop() const { + assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!"); + if (CurrentSCC.size() > 1) return true; + NodeType *N = CurrentSCC.front(); + for (ChildItTy CI = GT::child_begin(N), CE=GT::child_end(N); CI != CE; ++CI) + if (*CI == N) + return true; + return false; + } +}; + + +// Global constructor for the SCC iterator. +template <class T> +scc_iterator<T> scc_begin(T G) { + return scc_iterator<T>::begin(G); +} + +template <class T> +scc_iterator<T> scc_end(T G) { + return scc_iterator<T>::end(G); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h new file mode 100644 index 0000000..964e7e0 --- /dev/null +++ b/include/llvm/ADT/STLExtras.h @@ -0,0 +1,268 @@ +//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL at all. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STLEXTRAS_H +#define LLVM_ADT_STLEXTRAS_H + +#include <cstddef> // for std::size_t +#include <functional> +#include <utility> // for std::pair +#include "llvm/ADT/iterator.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Extra additions to <functional> +//===----------------------------------------------------------------------===// + +template<class Ty> +struct greater_ptr : public std::binary_function<Ty, Ty, bool> { + bool operator()(const Ty* left, const Ty* right) const { + return *right < *left; + } +}; + +// deleter - Very very very simple method that is used to invoke operator +// delete on something. It is used like this: +// +// for_each(V.begin(), B.end(), deleter<Interval>); +// +template <class T> +static inline void deleter(T *Ptr) { + delete Ptr; +} + + + +//===----------------------------------------------------------------------===// +// Extra additions to <iterator> +//===----------------------------------------------------------------------===// + +// mapped_iterator - This is a simple iterator adapter that causes a function to +// be dereferenced whenever operator* is invoked on the iterator. +// +template <class RootIt, class UnaryFunc> +class mapped_iterator { + RootIt current; + UnaryFunc Fn; +public: + typedef typename std::iterator_traits<RootIt>::iterator_category + iterator_category; + typedef typename std::iterator_traits<RootIt>::difference_type + difference_type; + typedef typename UnaryFunc::result_type value_type; + + typedef void pointer; + //typedef typename UnaryFunc::result_type *pointer; + typedef void reference; // Can't modify value returned by fn + + typedef RootIt iterator_type; + typedef mapped_iterator<RootIt, UnaryFunc> _Self; + + inline const RootIt &getCurrent() const { return current; } + inline const UnaryFunc &getFunc() const { return Fn; } + + inline explicit mapped_iterator(const RootIt &I, UnaryFunc F) + : current(I), Fn(F) {} + inline mapped_iterator(const mapped_iterator &It) + : current(It.current), Fn(It.Fn) {} + + inline value_type operator*() const { // All this work to do this + return Fn(*current); // little change + } + + _Self& operator++() { ++current; return *this; } + _Self& operator--() { --current; return *this; } + _Self operator++(int) { _Self __tmp = *this; ++current; return __tmp; } + _Self operator--(int) { _Self __tmp = *this; --current; return __tmp; } + _Self operator+ (difference_type n) const { + return _Self(current + n, Fn); + } + _Self& operator+= (difference_type n) { current += n; return *this; } + _Self operator- (difference_type n) const { + return _Self(current - n, Fn); + } + _Self& operator-= (difference_type n) { current -= n; return *this; } + reference operator[](difference_type n) const { return *(*this + n); } + + inline bool operator!=(const _Self &X) const { return !operator==(X); } + inline bool operator==(const _Self &X) const { return current == X.current; } + inline bool operator< (const _Self &X) const { return current < X.current; } + + inline difference_type operator-(const _Self &X) const { + return current - X.current; + } +}; + +template <class _Iterator, class Func> +inline mapped_iterator<_Iterator, Func> +operator+(typename mapped_iterator<_Iterator, Func>::difference_type N, + const mapped_iterator<_Iterator, Func>& X) { + return mapped_iterator<_Iterator, Func>(X.getCurrent() - N, X.getFunc()); +} + + +// map_iterator - Provide a convenient way to create mapped_iterators, just like +// make_pair is useful for creating pairs... +// +template <class ItTy, class FuncTy> +inline mapped_iterator<ItTy, FuncTy> map_iterator(const ItTy &I, FuncTy F) { + return mapped_iterator<ItTy, FuncTy>(I, F); +} + + +// next/prior - These functions unlike std::advance do not modify the +// passed iterator but return a copy. +// +// next(myIt) returns copy of myIt incremented once +// next(myIt, n) returns copy of myIt incremented n times +// prior(myIt) returns copy of myIt decremented once +// prior(myIt, n) returns copy of myIt decremented n times + +template <typename ItTy, typename Dist> +inline ItTy next(ItTy it, Dist n) +{ + std::advance(it, n); + return it; +} + +template <typename ItTy> +inline ItTy next(ItTy it) +{ + return ++it; +} + +template <typename ItTy, typename Dist> +inline ItTy prior(ItTy it, Dist n) +{ + std::advance(it, -n); + return it; +} + +template <typename ItTy> +inline ItTy prior(ItTy it) +{ + return --it; +} + +//===----------------------------------------------------------------------===// +// Extra additions to <utility> +//===----------------------------------------------------------------------===// + +// tie - this function ties two objects and returns a temporary object +// that is assignable from a std::pair. This can be used to make code +// more readable when using values returned from functions bundled in +// a std::pair. Since an example is worth 1000 words: +// +// typedef std::map<int, int> Int2IntMap; +// +// Int2IntMap myMap; +// Int2IntMap::iterator where; +// bool inserted; +// tie(where, inserted) = myMap.insert(std::make_pair(123,456)); +// +// if (inserted) +// // do stuff +// else +// // do other stuff + +namespace +{ + template <typename T1, typename T2> + struct tier { + typedef T1 &first_type; + typedef T2 &second_type; + + first_type first; + second_type second; + + tier(first_type f, second_type s) : first(f), second(s) { } + tier& operator=(const std::pair<T1, T2>& p) { + first = p.first; + second = p.second; + return *this; + } + }; +} + +template <typename T1, typename T2> +inline tier<T1, T2> tie(T1& f, T2& s) { + return tier<T1, T2>(f, s); +} + +//===----------------------------------------------------------------------===// +// Extra additions for arrays +//===----------------------------------------------------------------------===// + +/// Find where an array ends (for ending iterators) +/// This returns a pointer to the byte immediately +/// after the end of an array. +template<class T, std::size_t N> +inline T *array_endof(T (&x)[N]) { + return x+N; +} + +/// Find the length of an array. +template<class T, std::size_t N> +inline size_t array_lengthof(T (&x)[N]) { + return N; +} + +/// array_pod_sort_comparator - This is helper function for array_pod_sort, +/// which just uses operator< on T. +template<typename T> +static inline int array_pod_sort_comparator(const void *P1, const void *P2) { + if (*reinterpret_cast<const T*>(P1) < *reinterpret_cast<const T*>(P2)) + return -1; + if (*reinterpret_cast<const T*>(P2) < *reinterpret_cast<const T*>(P1)) + return 1; + return 0; +} + +/// get_array_pad_sort_comparator - This is an internal helper function used to +/// get type deduction of T right. +template<typename T> +static int (*get_array_pad_sort_comparator(const T &X)) + (const void*, const void*) { + return array_pod_sort_comparator<T>; +} + + +/// array_pod_sort - This sorts an array with the specified start and end +/// extent. This is just like std::sort, except that it calls qsort instead of +/// using an inlined template. qsort is slightly slower than std::sort, but +/// most sorts are not performance critical in LLVM and std::sort has to be +/// template instantiated for each type, leading to significant measured code +/// bloat. This function should generally be used instead of std::sort where +/// possible. +/// +/// This function assumes that you have simple POD-like types that can be +/// compared with operator< and can be moved with memcpy. If this isn't true, +/// you should use std::sort. +/// +/// NOTE: If qsort_r were portable, we could allow a custom comparator and +/// default to std::less. +template<class IteratorTy> +static inline void array_pod_sort(IteratorTy Start, IteratorTy End) { + // Don't dereference start iterator of empty sequence. + if (Start == End) return; + qsort(&*Start, End-Start, sizeof(*Start), + get_array_pad_sort_comparator(*Start)); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/ScopedHashTable.h b/include/llvm/ADT/ScopedHashTable.h new file mode 100644 index 0000000..d538295 --- /dev/null +++ b/include/llvm/ADT/ScopedHashTable.h @@ -0,0 +1,193 @@ +//===- ScopedHashTable.h - A simple scoped hash table ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements an efficient scoped hash table, which is useful for +// things like dominator-based optimizations. This allows clients to do things +// like this: +// +// ScopedHashTable<int, int> HT; +// { +// ScopedHashTableScope<int, int> Scope1(HT); +// HT.insert(0, 0); +// HT.insert(1, 1); +// { +// ScopedHashTableScope<int, int> Scope2(HT); +// HT.insert(0, 42); +// } +// } +// +// Looking up the value for "0" in the Scope2 block will return 42. Looking +// up the value for 0 before 42 is inserted or after Scope2 is popped will +// return 0. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SCOPEDHASHTABLE_H +#define LLVM_ADT_SCOPEDHASHTABLE_H + +#include <cassert> +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + +template <typename K, typename V> +class ScopedHashTable; + +template <typename K, typename V> +class ScopedHashTableVal { + ScopedHashTableVal *NextInScope; + ScopedHashTableVal *NextForKey; + K Key; + V Val; +public: + ScopedHashTableVal(ScopedHashTableVal *nextInScope, + ScopedHashTableVal *nextForKey, const K &key, const V &val) + : NextInScope(nextInScope), NextForKey(nextForKey), Key(key), Val(val) { + } + + const K &getKey() const { return Key; } + const V &getValue() const { return Val; } + V &getValue() { return Val; } + + ScopedHashTableVal *getNextForKey() { return NextForKey; } + const ScopedHashTableVal *getNextForKey() const { return NextForKey; } +public: + ScopedHashTableVal *getNextInScope() { return NextInScope; } +}; + +template <typename K, typename V> +class ScopedHashTableScope { + /// HT - The hashtable that we are active for. + ScopedHashTable<K, V> &HT; + + /// PrevScope - This is the scope that we are shadowing in HT. + ScopedHashTableScope *PrevScope; + + /// LastValInScope - This is the last value that was inserted for this scope + /// or null if none have been inserted yet. + ScopedHashTableVal<K,V> *LastValInScope; + void operator=(ScopedHashTableScope&); // DO NOT IMPLEMENT + ScopedHashTableScope(ScopedHashTableScope&); // DO NOT IMPLEMENT +public: + ScopedHashTableScope(ScopedHashTable<K, V> &HT); + ~ScopedHashTableScope(); + +private: + friend class ScopedHashTable<K, V>; + ScopedHashTableVal<K, V> *getLastValInScope() { return LastValInScope; } + void setLastValInScope(ScopedHashTableVal<K,V> *Val) { LastValInScope = Val; } +}; + + +template <typename K, typename V> +class ScopedHashTableIterator { + ScopedHashTableVal<K,V> *Node; +public: + ScopedHashTableIterator(ScopedHashTableVal<K,V> *node) : Node(node){} + + V &operator*() const { + assert(Node && "Dereference end()"); + return Node->getValue(); + } + V *operator->() const { + return &Node->getValue(); + } + + bool operator==(const ScopedHashTableIterator &RHS) const { + return Node == RHS.Node; + } + bool operator!=(const ScopedHashTableIterator &RHS) const { + return Node != RHS.Node; + } + + inline ScopedHashTableIterator& operator++() { // Preincrement + assert(Node && "incrementing past end()"); + Node = Node->getNextForKey(); + return *this; + } + ScopedHashTableIterator operator++(int) { // Postincrement + ScopedHashTableIterator tmp = *this; ++*this; return tmp; + } +}; + + +template <typename K, typename V> +class ScopedHashTable { + DenseMap<K, ScopedHashTableVal<K,V>*> TopLevelMap; + ScopedHashTableScope<K, V> *CurScope; + ScopedHashTable(const ScopedHashTable&); // NOT YET IMPLEMENTED + void operator=(const ScopedHashTable&); // NOT YET IMPLEMENTED + friend class ScopedHashTableScope<K, V>; +public: + ScopedHashTable() : CurScope(0) {} + ~ScopedHashTable() { + assert(CurScope == 0 && TopLevelMap.empty() && "Scope imbalance!"); + } + + void insert(const K &Key, const V &Val) { + assert(CurScope && "No scope active!"); + + ScopedHashTableVal<K,V> *&KeyEntry = TopLevelMap[Key]; + + KeyEntry = new ScopedHashTableVal<K,V>(CurScope->getLastValInScope(), + KeyEntry, Key, Val); + CurScope->setLastValInScope(KeyEntry); + } + + typedef ScopedHashTableIterator<K, V> iterator; + + iterator end() { return iterator(0); } + + iterator begin(const K &Key) { + typename DenseMap<K, ScopedHashTableVal<K,V>*>::iterator I = + TopLevelMap.find(Key); + if (I == TopLevelMap.end()) return end(); + return iterator(I->second); + } +}; + +/// ScopedHashTableScope ctor - Install this as the current scope for the hash +/// table. +template <typename K, typename V> +ScopedHashTableScope<K, V>::ScopedHashTableScope(ScopedHashTable<K, V> &ht) + : HT(ht) { + PrevScope = HT.CurScope; + HT.CurScope = this; + LastValInScope = 0; +} + +template <typename K, typename V> +ScopedHashTableScope<K, V>::~ScopedHashTableScope() { + assert(HT.CurScope == this && "Scope imbalance!"); + HT.CurScope = PrevScope; + + // Pop and delete all values corresponding to this scope. + while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) { + // Pop this value out of the TopLevelMap. + if (ThisEntry->getNextForKey() == 0) { + assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry && + "Scope imbalance!"); + HT.TopLevelMap.erase(ThisEntry->getKey()); + } else { + ScopedHashTableVal<K,V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()]; + assert(KeyEntry == ThisEntry && "Scope imbalance!"); + KeyEntry = ThisEntry->getNextForKey(); + } + + // Pop this value out of the scope. + LastValInScope = ThisEntry->getNextInScope(); + + // Delete this entry. + delete ThisEntry; + } +} + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/SetOperations.h b/include/llvm/ADT/SetOperations.h new file mode 100644 index 0000000..71f5db3 --- /dev/null +++ b/include/llvm/ADT/SetOperations.h @@ -0,0 +1,71 @@ +//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines generic set operations that may be used on set's of +// different types, and different element types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SETOPERATIONS_H +#define LLVM_ADT_SETOPERATIONS_H + +namespace llvm { + +/// set_union(A, B) - Compute A := A u B, return whether A changed. +/// +template <class S1Ty, class S2Ty> +bool set_union(S1Ty &S1, const S2Ty &S2) { + bool Changed = false; + + for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end(); + SI != SE; ++SI) + if (S1.insert(*SI).second) + Changed = true; + + return Changed; +} + +/// set_intersect(A, B) - Compute A := A ^ B +/// Identical to set_intersection, except that it works on set<>'s and +/// is nicer to use. Functionally, this iterates through S1, removing +/// elements that are not contained in S2. +/// +template <class S1Ty, class S2Ty> +void set_intersect(S1Ty &S1, const S2Ty &S2) { + for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) { + const typename S1Ty::key_type &E = *I; + ++I; + if (!S2.count(E)) S1.erase(E); // Erase element if not in S2 + } +} + +/// set_difference(A, B) - Return A - B +/// +template <class S1Ty, class S2Ty> +S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) { + S1Ty Result; + for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end(); + SI != SE; ++SI) + if (!S2.count(*SI)) // if the element is not in set2 + Result.insert(*SI); + return Result; +} + +/// set_subtract(A, B) - Compute A := A - B +/// +template <class S1Ty, class S2Ty> +void set_subtract(S1Ty &S1, const S2Ty &S2) { + for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end(); + SI != SE; ++SI) + S1.erase(*SI); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/SetVector.h b/include/llvm/ADT/SetVector.h new file mode 100644 index 0000000..fab133a --- /dev/null +++ b/include/llvm/ADT/SetVector.h @@ -0,0 +1,168 @@ +//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a set that has insertion order iteration +// characteristics. This is useful for keeping a set of things that need to be +// visited later but in a deterministic order (insertion order). The interface +// is purposefully minimal. +// +// This file defines SetVector and SmallSetVector, which performs no allocations +// if the SetVector has less than a certain number of elements. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SETVECTOR_H +#define LLVM_ADT_SETVECTOR_H + +#include "llvm/ADT/SmallSet.h" +#include <algorithm> +#include <cassert> +#include <vector> + +namespace llvm { + +/// This adapter class provides a way to keep a set of things that also has the +/// property of a deterministic iteration order. The order of iteration is the +/// order of insertion. +/// @brief A vector that has set insertion semantics. +template <typename T, typename Vector = std::vector<T>, + typename Set = SmallSet<T, 16> > +class SetVector { +public: + typedef T value_type; + typedef T key_type; + typedef T& reference; + typedef const T& const_reference; + typedef Set set_type; + typedef Vector vector_type; + typedef typename vector_type::const_iterator iterator; + typedef typename vector_type::const_iterator const_iterator; + typedef typename vector_type::size_type size_type; + + /// @brief Construct an empty SetVector + SetVector() {} + + /// @brief Initialize a SetVector with a range of elements + template<typename It> + SetVector(It Start, It End) { + insert(Start, End); + } + + /// @brief Determine if the SetVector is empty or not. + bool empty() const { + return vector_.empty(); + } + + /// @brief Determine the number of elements in the SetVector. + size_type size() const { + return vector_.size(); + } + + /// @brief Get an iterator to the beginning of the SetVector. + iterator begin() { + return vector_.begin(); + } + + /// @brief Get a const_iterator to the beginning of the SetVector. + const_iterator begin() const { + return vector_.begin(); + } + + /// @brief Get an iterator to the end of the SetVector. + iterator end() { + return vector_.end(); + } + + /// @brief Get a const_iterator to the end of the SetVector. + const_iterator end() const { + return vector_.end(); + } + + /// @brief Return the last element of the SetVector. + const T &back() const { + assert(!empty() && "Cannot call back() on empty SetVector!"); + return vector_.back(); + } + + /// @brief Index into the SetVector. + const_reference operator[](size_type n) const { + assert(n < vector_.size() && "SetVector access out of range!"); + return vector_[n]; + } + + /// @returns true iff the element was inserted into the SetVector. + /// @brief Insert a new element into the SetVector. + bool insert(const value_type &X) { + bool result = set_.insert(X); + if (result) + vector_.push_back(X); + return result; + } + + /// @brief Insert a range of elements into the SetVector. + template<typename It> + void insert(It Start, It End) { + for (; Start != End; ++Start) + if (set_.insert(*Start)) + vector_.push_back(*Start); + } + + /// @brief Remove an item from the set vector. + void remove(const value_type& X) { + if (set_.erase(X)) { + typename vector_type::iterator I = + std::find(vector_.begin(), vector_.end(), X); + assert(I != vector_.end() && "Corrupted SetVector instances!"); + vector_.erase(I); + } + } + + + /// @returns 0 if the element is not in the SetVector, 1 if it is. + /// @brief Count the number of elements of a given key in the SetVector. + size_type count(const key_type &key) const { + return set_.count(key); + } + + /// @brief Completely clear the SetVector + void clear() { + set_.clear(); + vector_.clear(); + } + + /// @brief Remove the last element of the SetVector. + void pop_back() { + assert(!empty() && "Cannot remove an element from an empty SetVector!"); + set_.erase(back()); + vector_.pop_back(); + } + +private: + set_type set_; ///< The set. + vector_type vector_; ///< The vector. +}; + +/// SmallSetVector - A SetVector that performs no allocations if smaller than +/// a certain size. +template <typename T, unsigned N> +class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > { +public: + SmallSetVector() {} + + /// @brief Initialize a SmallSetVector with a range of elements + template<typename It> + SmallSetVector(It Start, It End) { + this->insert(Start, End); + } +}; + +} // End llvm namespace + +// vim: sw=2 ai +#endif diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h new file mode 100644 index 0000000..a189de2 --- /dev/null +++ b/include/llvm/ADT/SmallPtrSet.h @@ -0,0 +1,284 @@ +//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SmallPtrSet class. See the doxygen comment for +// SmallPtrSetImpl for more details on the algorithm used. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SMALLPTRSET_H +#define LLVM_ADT_SMALLPTRSET_H + +#include <cassert> +#include <cstring> +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/PointerLikeTypeTraits.h" + +namespace llvm { + +class SmallPtrSetIteratorImpl; + +/// SmallPtrSetImpl - This is the common code shared among all the +/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one +/// for small and one for large sets. +/// +/// Small sets use an array of pointers allocated in the SmallPtrSet object, +/// which is treated as a simple array of pointers. When a pointer is added to +/// the set, the array is scanned to see if the element already exists, if not +/// the element is 'pushed back' onto the array. If we run out of space in the +/// array, we grow into the 'large set' case. SmallSet should be used when the +/// sets are often small. In this case, no memory allocation is used, and only +/// light-weight and cache-efficient scanning is used. +/// +/// Large sets use a classic exponentially-probed hash table. Empty buckets are +/// represented with an illegal pointer value (-1) to allow null pointers to be +/// inserted. Tombstones are represented with another illegal pointer value +/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or +/// more. When this happens, the table is doubled in size. +/// +class SmallPtrSetImpl { + friend class SmallPtrSetIteratorImpl; +protected: + /// CurArray - This is the current set of buckets. If it points to + /// SmallArray, then the set is in 'small mode'. + const void **CurArray; + /// CurArraySize - The allocated size of CurArray, always a power of two. + /// Note that CurArray points to an array that has CurArraySize+1 elements in + /// it, so that the end iterator actually points to valid memory. + unsigned CurArraySize; + + // If small, this is # elts allocated consequtively + unsigned NumElements; + unsigned NumTombstones; + const void *SmallArray[1]; // Must be last ivar. + + // Helper to copy construct a SmallPtrSet. + SmallPtrSetImpl(const SmallPtrSetImpl& that); + explicit SmallPtrSetImpl(unsigned SmallSize) { + assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 && + "Initial size must be a power of two!"); + CurArray = &SmallArray[0]; + CurArraySize = SmallSize; + // The end pointer, always valid, is set to a valid element to help the + // iterator. + CurArray[SmallSize] = 0; + clear(); + } + ~SmallPtrSetImpl(); + +public: + bool empty() const { return size() == 0; } + unsigned size() const { return NumElements; } + + void clear() { + // If the capacity of the array is huge, and the # elements used is small, + // shrink the array. + if (!isSmall() && NumElements*4 < CurArraySize && CurArraySize > 32) + return shrink_and_clear(); + + // Fill the array with empty markers. + memset(CurArray, -1, CurArraySize*sizeof(void*)); + NumElements = 0; + NumTombstones = 0; + } + +protected: + static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); } + static void *getEmptyMarker() { + // Note that -1 is chosen to make clear() efficiently implementable with + // memset and because it's not a valid pointer value. + return reinterpret_cast<void*>(-1); + } + + /// insert_imp - This returns true if the pointer was new to the set, false if + /// it was already in the set. This is hidden from the client so that the + /// derived class can check that the right type of pointer is passed in. + bool insert_imp(const void * Ptr); + + /// erase_imp - If the set contains the specified pointer, remove it and + /// return true, otherwise return false. This is hidden from the client so + /// that the derived class can check that the right type of pointer is passed + /// in. + bool erase_imp(const void * Ptr); + + bool count_imp(const void * Ptr) const { + if (isSmall()) { + // Linear search for the item. + for (const void *const *APtr = SmallArray, + *const *E = SmallArray+NumElements; APtr != E; ++APtr) + if (*APtr == Ptr) + return true; + return false; + } + + // Big set case. + return *FindBucketFor(Ptr) == Ptr; + } + +private: + bool isSmall() const { return CurArray == &SmallArray[0]; } + + unsigned Hash(const void *Ptr) const { + return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1)); + } + const void * const *FindBucketFor(const void *Ptr) const; + void shrink_and_clear(); + + /// Grow - Allocate a larger backing store for the buckets and move it over. + void Grow(); + + void operator=(const SmallPtrSetImpl &RHS); // DO NOT IMPLEMENT. +protected: + void CopyFrom(const SmallPtrSetImpl &RHS); +}; + +/// SmallPtrSetIteratorImpl - This is the common base class shared between all +/// instances of SmallPtrSetIterator. +class SmallPtrSetIteratorImpl { +protected: + const void *const *Bucket; +public: + explicit SmallPtrSetIteratorImpl(const void *const *BP) : Bucket(BP) { + AdvanceIfNotValid(); + } + + bool operator==(const SmallPtrSetIteratorImpl &RHS) const { + return Bucket == RHS.Bucket; + } + bool operator!=(const SmallPtrSetIteratorImpl &RHS) const { + return Bucket != RHS.Bucket; + } + +protected: + /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket + /// that is. This is guaranteed to stop because the end() bucket is marked + /// valid. + void AdvanceIfNotValid() { + while (*Bucket == SmallPtrSetImpl::getEmptyMarker() || + *Bucket == SmallPtrSetImpl::getTombstoneMarker()) + ++Bucket; + } +}; + +/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet. +template<typename PtrTy> +class SmallPtrSetIterator : public SmallPtrSetIteratorImpl { + typedef PointerLikeTypeTraits<PtrTy> PtrTraits; +public: + explicit SmallPtrSetIterator(const void *const *BP) + : SmallPtrSetIteratorImpl(BP) {} + + // Most methods provided by baseclass. + + const PtrTy operator*() const { + return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket)); + } + + inline SmallPtrSetIterator& operator++() { // Preincrement + ++Bucket; + AdvanceIfNotValid(); + return *this; + } + + SmallPtrSetIterator operator++(int) { // Postincrement + SmallPtrSetIterator tmp = *this; ++*this; return tmp; + } +}; + +/// NextPowerOfTwo - This is a helper template that rounds N up to the next +/// power of two. +template<unsigned N> +struct NextPowerOfTwo; + +/// NextPowerOfTwoH - If N is not a power of two, increase it. This is a helper +/// template used to implement NextPowerOfTwo. +template<unsigned N, bool isPowerTwo> +struct NextPowerOfTwoH { + enum { Val = N }; +}; +template<unsigned N> +struct NextPowerOfTwoH<N, false> { + enum { + // We could just use NextVal = N+1, but this converges faster. N|(N-1) sets + // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111. + Val = NextPowerOfTwo<(N|(N-1)) + 1>::Val + }; +}; + +template<unsigned N> +struct NextPowerOfTwo { + enum { Val = NextPowerOfTwoH<N, (N&(N-1)) == 0>::Val }; +}; + + +/// SmallPtrSet - This class implements a set which is optimizer for holding +/// SmallSize or less elements. This internally rounds up SmallSize to the next +/// power of two if it is not already a power of two. See the comments above +/// SmallPtrSetImpl for details of the algorithm. +template<class PtrType, unsigned SmallSize> +class SmallPtrSet : public SmallPtrSetImpl { + // Make sure that SmallSize is a power of two, round up if not. + enum { SmallSizePowTwo = NextPowerOfTwo<SmallSize>::Val }; + void *SmallArray[SmallSizePowTwo]; + typedef PointerLikeTypeTraits<PtrType> PtrTraits; +public: + SmallPtrSet() : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {} + SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(that) {} + + template<typename It> + SmallPtrSet(It I, It E) + : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) { + insert(I, E); + } + + /// insert - This returns true if the pointer was new to the set, false if it + /// was already in the set. + bool insert(PtrType Ptr) { + return insert_imp(PtrTraits::getAsVoidPointer(Ptr)); + } + + /// erase - If the set contains the specified pointer, remove it and return + /// true, otherwise return false. + bool erase(PtrType Ptr) { + return erase_imp(PtrTraits::getAsVoidPointer(Ptr)); + } + + /// count - Return true if the specified pointer is in the set. + bool count(PtrType Ptr) const { + return count_imp(PtrTraits::getAsVoidPointer(Ptr)); + } + + template <typename IterT> + void insert(IterT I, IterT E) { + for (; I != E; ++I) + insert(*I); + } + + typedef SmallPtrSetIterator<PtrType> iterator; + typedef SmallPtrSetIterator<PtrType> const_iterator; + inline iterator begin() const { + return iterator(CurArray); + } + inline iterator end() const { + return iterator(CurArray+CurArraySize); + } + + // Allow assignment from any smallptrset with the same element type even if it + // doesn't have the same smallsize. + const SmallPtrSet<PtrType, SmallSize>& + operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) { + CopyFrom(RHS); + return *this; + } + +}; + +} + +#endif diff --git a/include/llvm/ADT/SmallSet.h b/include/llvm/ADT/SmallSet.h new file mode 100644 index 0000000..caaa96c --- /dev/null +++ b/include/llvm/ADT/SmallSet.h @@ -0,0 +1,118 @@ +//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SmallSet class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SMALLSET_H +#define LLVM_ADT_SMALLSET_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include <set> + +namespace llvm { + +/// SmallSet - This maintains a set of unique values, optimizing for the case +/// when the set is small (less than N). In this case, the set can be +/// maintained with no mallocs. If the set gets large, we expand to using an +/// std::set to maintain reasonable lookup times. +/// +/// Note that this set does not provide a way to iterate over members in the +/// set. +template <typename T, unsigned N> +class SmallSet { + /// Use a SmallVector to hold the elements here (even though it will never + /// reach it's 'large' stage) to avoid calling the default ctors of elements + /// we will never use. + SmallVector<T, N> Vector; + std::set<T> Set; + typedef typename SmallVector<T, N>::const_iterator VIterator; + typedef typename SmallVector<T, N>::iterator mutable_iterator; +public: + SmallSet() {} + + bool empty() const { return Vector.empty() && Set.empty(); } + unsigned size() const { + return isSmall() ? Vector.size() : Set.size(); + } + + /// count - Return true if the element is in the set. + bool count(const T &V) const { + if (isSmall()) { + // Since the collection is small, just do a linear search. + return vfind(V) != Vector.end(); + } else { + return Set.count(V); + } + } + + /// insert - Insert an element into the set if it isn't already there. + bool insert(const T &V) { + if (!isSmall()) + return Set.insert(V).second; + + VIterator I = vfind(V); + if (I != Vector.end()) // Don't reinsert if it already exists. + return false; + if (Vector.size() < N) { + Vector.push_back(V); + return true; + } + + // Otherwise, grow from vector to set. + while (!Vector.empty()) { + Set.insert(Vector.back()); + Vector.pop_back(); + } + Set.insert(V); + return true; + } + + template <typename IterT> + void insert(IterT I, IterT E) { + for (; I != E; ++I) + insert(*I); + } + + bool erase(const T &V) { + if (!isSmall()) + return Set.erase(V); + for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I) + if (*I == V) { + Vector.erase(I); + return true; + } + return false; + } + + void clear() { + Vector.clear(); + Set.clear(); + } +private: + bool isSmall() const { return Set.empty(); } + + VIterator vfind(const T &V) const { + for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I) + if (*I == V) + return I; + return Vector.end(); + } +}; + +/// If this set is of pointer values, transparently switch over to using +/// SmallPtrSet for performance. +template <typename PointeeType, unsigned N> +class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/SmallString.h b/include/llvm/ADT/SmallString.h new file mode 100644 index 0000000..687fa2d --- /dev/null +++ b/include/llvm/ADT/SmallString.h @@ -0,0 +1,109 @@ +//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SmallString class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SMALLSTRING_H +#define LLVM_ADT_SMALLSTRING_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/DataTypes.h" +#include <cstring> + +namespace llvm { + +/// SmallString - A SmallString is just a SmallVector with methods and accessors +/// that make it work better as a string (e.g. operator+ etc). +template<unsigned InternalLen> +class SmallString : public SmallVector<char, InternalLen> { +public: + // Default ctor - Initialize to empty. + SmallString() {} + + // Initialize with a range. + template<typename ItTy> + SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {} + + // Copy ctor. + SmallString(const SmallString &RHS) : SmallVector<char, InternalLen>(RHS) {} + + + // Extra methods. + const char *c_str() const { + SmallString *This = const_cast<SmallString*>(this); + // Ensure that there is a \0 at the end of the string. + This->reserve(this->size()+1); + This->End[0] = 0; + return this->begin(); + } + + // Extra operators. + const SmallString &operator=(const char *RHS) { + this->clear(); + return *this += RHS; + } + + SmallString &operator+=(const char *RHS) { + this->append(RHS, RHS+strlen(RHS)); + return *this; + } + SmallString &operator+=(char C) { + this->push_back(C); + return *this; + } + + SmallString &append_uint_32(uint32_t N) { + char Buffer[20]; + char *BufPtr = Buffer+20; + + if (N == 0) *--BufPtr = '0'; // Handle special case. + + while (N) { + *--BufPtr = '0' + char(N % 10); + N /= 10; + } + this->append(BufPtr, Buffer+20); + return *this; + } + + SmallString &append_uint(uint64_t N) { + if (N == uint32_t(N)) + return append_uint_32(uint32_t(N)); + + char Buffer[40]; + char *BufPtr = Buffer+40; + + if (N == 0) *--BufPtr = '0'; // Handle special case... + + while (N) { + *--BufPtr = '0' + char(N % 10); + N /= 10; + } + + this->append(BufPtr, Buffer+40); + return *this; + } + + SmallString &append_sint(int64_t N) { + // TODO, wrong for minint64. + if (N < 0) { + this->push_back('-'); + N = -N; + } + return append_uint(N); + } + +}; + + +} + +#endif diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h new file mode 100644 index 0000000..f59a438 --- /dev/null +++ b/include/llvm/ADT/SmallVector.h @@ -0,0 +1,617 @@ +//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SmallVector class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SMALLVECTOR_H +#define LLVM_ADT_SMALLVECTOR_H + +#include "llvm/ADT/iterator.h" +#include "llvm/Support/type_traits.h" +#include <algorithm> +#include <cassert> +#include <cstring> +#include <memory> + +#ifdef _MSC_VER +namespace std { +#if _MSC_VER <= 1310 + // Work around flawed VC++ implementation of std::uninitialized_copy. Define + // additional overloads so that elements with pointer types are recognized as + // scalars and not objects, causing bizarre type conversion errors. + template<class T1, class T2> + inline _Scalar_ptr_iterator_tag _Ptr_cat(T1 **, T2 **) { + _Scalar_ptr_iterator_tag _Cat; + return _Cat; + } + + template<class T1, class T2> + inline _Scalar_ptr_iterator_tag _Ptr_cat(T1* const *, T2 **) { + _Scalar_ptr_iterator_tag _Cat; + return _Cat; + } +#else +// FIXME: It is not clear if the problem is fixed in VS 2005. What is clear +// is that the above hack won't work if it wasn't fixed. +#endif +} +#endif + +namespace llvm { + +/// SmallVectorImpl - This class consists of common code factored out of the +/// SmallVector class to reduce code duplication based on the SmallVector 'N' +/// template parameter. +template <typename T> +class SmallVectorImpl { +protected: + T *Begin, *End, *Capacity; + + // Allocate raw space for N elements of type T. If T has a ctor or dtor, we + // don't want it to be automatically run, so we need to represent the space as + // something else. An array of char would work great, but might not be + // aligned sufficiently. Instead, we either use GCC extensions, or some + // number of union instances for the space, which guarantee maximal alignment. +protected: +#ifdef __GNUC__ + typedef char U; + U FirstEl __attribute__((aligned)); +#else + union U { + double D; + long double LD; + long long L; + void *P; + } FirstEl; +#endif + // Space after 'FirstEl' is clobbered, do not add any instance vars after it. +public: + // Default ctor - Initialize to empty. + explicit SmallVectorImpl(unsigned N) + : Begin(reinterpret_cast<T*>(&FirstEl)), + End(reinterpret_cast<T*>(&FirstEl)), + Capacity(reinterpret_cast<T*>(&FirstEl)+N) { + } + + ~SmallVectorImpl() { + // Destroy the constructed elements in the vector. + destroy_range(Begin, End); + + // If this wasn't grown from the inline copy, deallocate the old space. + if (!isSmall()) + operator delete(Begin); + } + + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + + bool empty() const { return Begin == End; } + size_type size() const { return End-Begin; } + size_type max_size() const { return size_type(-1) / sizeof(T); } + + // forward iterator creation methods. + iterator begin() { return Begin; } + const_iterator begin() const { return Begin; } + iterator end() { return End; } + const_iterator end() const { return End; } + + // reverse iterator creation methods. + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin());} + + + reference operator[](unsigned idx) { + assert (Begin + idx < End); + return Begin[idx]; + } + const_reference operator[](unsigned idx) const { + assert (Begin + idx < End); + return Begin[idx]; + } + + reference front() { + return begin()[0]; + } + const_reference front() const { + return begin()[0]; + } + + reference back() { + return end()[-1]; + } + const_reference back() const { + return end()[-1]; + } + + void push_back(const_reference Elt) { + if (End < Capacity) { + Retry: + new (End) T(Elt); + ++End; + return; + } + grow(); + goto Retry; + } + + void pop_back() { + --End; + End->~T(); + } + + T pop_back_val() { + T Result = back(); + pop_back(); + return Result; + } + + void clear() { + destroy_range(Begin, End); + End = Begin; + } + + void resize(unsigned N) { + if (N < size()) { + destroy_range(Begin+N, End); + End = Begin+N; + } else if (N > size()) { + if (unsigned(Capacity-Begin) < N) + grow(N); + construct_range(End, Begin+N, T()); + End = Begin+N; + } + } + + void resize(unsigned N, const T &NV) { + if (N < size()) { + destroy_range(Begin+N, End); + End = Begin+N; + } else if (N > size()) { + if (unsigned(Capacity-Begin) < N) + grow(N); + construct_range(End, Begin+N, NV); + End = Begin+N; + } + } + + void reserve(unsigned N) { + if (unsigned(Capacity-Begin) < N) + grow(N); + } + + void swap(SmallVectorImpl &RHS); + + /// append - Add the specified range to the end of the SmallVector. + /// + template<typename in_iter> + void append(in_iter in_start, in_iter in_end) { + size_type NumInputs = std::distance(in_start, in_end); + // Grow allocated space if needed. + if (NumInputs > size_type(Capacity-End)) + grow(size()+NumInputs); + + // Copy the new elements over. + std::uninitialized_copy(in_start, in_end, End); + End += NumInputs; + } + + /// append - Add the specified range to the end of the SmallVector. + /// + void append(size_type NumInputs, const T &Elt) { + // Grow allocated space if needed. + if (NumInputs > size_type(Capacity-End)) + grow(size()+NumInputs); + + // Copy the new elements over. + std::uninitialized_fill_n(End, NumInputs, Elt); + End += NumInputs; + } + + void assign(unsigned NumElts, const T &Elt) { + clear(); + if (unsigned(Capacity-Begin) < NumElts) + grow(NumElts); + End = Begin+NumElts; + construct_range(Begin, End, Elt); + } + + iterator erase(iterator I) { + iterator N = I; + // Shift all elts down one. + std::copy(I+1, End, I); + // Drop the last elt. + pop_back(); + return(N); + } + + iterator erase(iterator S, iterator E) { + iterator N = S; + // Shift all elts down. + iterator I = std::copy(E, End, S); + // Drop the last elts. + destroy_range(I, End); + End = I; + return(N); + } + + iterator insert(iterator I, const T &Elt) { + if (I == End) { // Important special case for empty vector. + push_back(Elt); + return end()-1; + } + + if (End < Capacity) { + Retry: + new (End) T(back()); + ++End; + // Push everything else over. + std::copy_backward(I, End-1, End); + *I = Elt; + return I; + } + size_t EltNo = I-Begin; + grow(); + I = Begin+EltNo; + goto Retry; + } + + iterator insert(iterator I, size_type NumToInsert, const T &Elt) { + if (I == End) { // Important special case for empty vector. + append(NumToInsert, Elt); + return end()-1; + } + + // Convert iterator to elt# to avoid invalidating iterator when we reserve() + size_t InsertElt = I-begin(); + + // Ensure there is enough space. + reserve(static_cast<unsigned>(size() + NumToInsert)); + + // Uninvalidate the iterator. + I = begin()+InsertElt; + + // If there are more elements between the insertion point and the end of the + // range than there are being inserted, we can use a simple approach to + // insertion. Since we already reserved space, we know that this won't + // reallocate the vector. + if (size_t(end()-I) >= NumToInsert) { + T *OldEnd = End; + append(End-NumToInsert, End); + + // Copy the existing elements that get replaced. + std::copy_backward(I, OldEnd-NumToInsert, OldEnd); + + std::fill_n(I, NumToInsert, Elt); + return I; + } + + // Otherwise, we're inserting more elements than exist already, and we're + // not inserting at the end. + + // Copy over the elements that we're about to overwrite. + T *OldEnd = End; + End += NumToInsert; + size_t NumOverwritten = OldEnd-I; + std::uninitialized_copy(I, OldEnd, End-NumOverwritten); + + // Replace the overwritten part. + std::fill_n(I, NumOverwritten, Elt); + + // Insert the non-overwritten middle part. + std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt); + return I; + } + + template<typename ItTy> + iterator insert(iterator I, ItTy From, ItTy To) { + if (I == End) { // Important special case for empty vector. + append(From, To); + return end()-1; + } + + size_t NumToInsert = std::distance(From, To); + // Convert iterator to elt# to avoid invalidating iterator when we reserve() + size_t InsertElt = I-begin(); + + // Ensure there is enough space. + reserve(static_cast<unsigned>(size() + NumToInsert)); + + // Uninvalidate the iterator. + I = begin()+InsertElt; + + // If there are more elements between the insertion point and the end of the + // range than there are being inserted, we can use a simple approach to + // insertion. Since we already reserved space, we know that this won't + // reallocate the vector. + if (size_t(end()-I) >= NumToInsert) { + T *OldEnd = End; + append(End-NumToInsert, End); + + // Copy the existing elements that get replaced. + std::copy_backward(I, OldEnd-NumToInsert, OldEnd); + + std::copy(From, To, I); + return I; + } + + // Otherwise, we're inserting more elements than exist already, and we're + // not inserting at the end. + + // Copy over the elements that we're about to overwrite. + T *OldEnd = End; + End += NumToInsert; + size_t NumOverwritten = OldEnd-I; + std::uninitialized_copy(I, OldEnd, End-NumOverwritten); + + // Replace the overwritten part. + std::copy(From, From+NumOverwritten, I); + + // Insert the non-overwritten middle part. + std::uninitialized_copy(From+NumOverwritten, To, OldEnd); + return I; + } + + /// data - Return a pointer to the vector's buffer, even if empty(). + pointer data() { + return pointer(Begin); + } + + /// data - Return a pointer to the vector's buffer, even if empty(). + const_pointer data() const { + return const_pointer(Begin); + } + + const SmallVectorImpl &operator=(const SmallVectorImpl &RHS); + + bool operator==(const SmallVectorImpl &RHS) const { + if (size() != RHS.size()) return false; + for (T *This = Begin, *That = RHS.Begin, *E = Begin+size(); + This != E; ++This, ++That) + if (*This != *That) + return false; + return true; + } + bool operator!=(const SmallVectorImpl &RHS) const { return !(*this == RHS); } + + bool operator<(const SmallVectorImpl &RHS) const { + return std::lexicographical_compare(begin(), end(), + RHS.begin(), RHS.end()); + } + +private: + /// isSmall - Return true if this is a smallvector which has not had dynamic + /// memory allocated for it. + bool isSmall() const { + return static_cast<const void*>(Begin) == + static_cast<const void*>(&FirstEl); + } + + /// grow - double the size of the allocated memory, guaranteeing space for at + /// least one more element or MinSize if specified. + void grow(size_type MinSize = 0); + + void construct_range(T *S, T *E, const T &Elt) { + for (; S != E; ++S) + new (S) T(Elt); + } + + void destroy_range(T *S, T *E) { + while (S != E) { + --E; + E->~T(); + } + } +}; + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template <typename T> +void SmallVectorImpl<T>::grow(size_t MinSize) { + size_t CurCapacity = Capacity-Begin; + size_t CurSize = size(); + size_t NewCapacity = 2*CurCapacity; + if (NewCapacity < MinSize) + NewCapacity = MinSize; + T *NewElts = static_cast<T*>(operator new(NewCapacity*sizeof(T))); + + // Copy the elements over. + if (is_class<T>::value) + std::uninitialized_copy(Begin, End, NewElts); + else + // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove). + memcpy(NewElts, Begin, CurSize * sizeof(T)); + + // Destroy the original elements. + destroy_range(Begin, End); + + // If this wasn't grown from the inline copy, deallocate the old space. + if (!isSmall()) + operator delete(Begin); + + Begin = NewElts; + End = NewElts+CurSize; + Capacity = Begin+NewCapacity; +} + +template <typename T> +void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) { + if (this == &RHS) return; + + // We can only avoid copying elements if neither vector is small. + if (!isSmall() && !RHS.isSmall()) { + std::swap(Begin, RHS.Begin); + std::swap(End, RHS.End); + std::swap(Capacity, RHS.Capacity); + return; + } + if (RHS.size() > size_type(Capacity-Begin)) + grow(RHS.size()); + if (size() > size_type(RHS.Capacity-RHS.begin())) + RHS.grow(size()); + + // Swap the shared elements. + size_t NumShared = size(); + if (NumShared > RHS.size()) NumShared = RHS.size(); + for (unsigned i = 0; i != static_cast<unsigned>(NumShared); ++i) + std::swap(Begin[i], RHS[i]); + + // Copy over the extra elts. + if (size() > RHS.size()) { + size_t EltDiff = size() - RHS.size(); + std::uninitialized_copy(Begin+NumShared, End, RHS.End); + RHS.End += EltDiff; + destroy_range(Begin+NumShared, End); + End = Begin+NumShared; + } else if (RHS.size() > size()) { + size_t EltDiff = RHS.size() - size(); + std::uninitialized_copy(RHS.Begin+NumShared, RHS.End, End); + End += EltDiff; + destroy_range(RHS.Begin+NumShared, RHS.End); + RHS.End = RHS.Begin+NumShared; + } +} + +template <typename T> +const SmallVectorImpl<T> & +SmallVectorImpl<T>::operator=(const SmallVectorImpl<T> &RHS) { + // Avoid self-assignment. + if (this == &RHS) return *this; + + // If we already have sufficient space, assign the common elements, then + // destroy any excess. + unsigned RHSSize = unsigned(RHS.size()); + unsigned CurSize = unsigned(size()); + if (CurSize >= RHSSize) { + // Assign common elements. + iterator NewEnd; + if (RHSSize) + NewEnd = std::copy(RHS.Begin, RHS.Begin+RHSSize, Begin); + else + NewEnd = Begin; + + // Destroy excess elements. + destroy_range(NewEnd, End); + + // Trim. + End = NewEnd; + return *this; + } + + // If we have to grow to have enough elements, destroy the current elements. + // This allows us to avoid copying them during the grow. + if (unsigned(Capacity-Begin) < RHSSize) { + // Destroy current elements. + destroy_range(Begin, End); + End = Begin; + CurSize = 0; + grow(RHSSize); + } else if (CurSize) { + // Otherwise, use assignment for the already-constructed elements. + std::copy(RHS.Begin, RHS.Begin+CurSize, Begin); + } + + // Copy construct the new elements in place. + std::uninitialized_copy(RHS.Begin+CurSize, RHS.End, Begin+CurSize); + + // Set end. + End = Begin+RHSSize; + return *this; +} + +/// SmallVector - This is a 'vector' (really, a variable-sized array), optimized +/// for the case when the array is small. It contains some number of elements +/// in-place, which allows it to avoid heap allocation when the actual number of +/// elements is below that threshold. This allows normal "small" cases to be +/// fast without losing generality for large inputs. +/// +/// Note that this does not attempt to be exception safe. +/// +template <typename T, unsigned N> +class SmallVector : public SmallVectorImpl<T> { + /// InlineElts - These are 'N-1' elements that are stored inline in the body + /// of the vector. The extra '1' element is stored in SmallVectorImpl. + typedef typename SmallVectorImpl<T>::U U; + enum { + // MinUs - The number of U's require to cover N T's. + MinUs = (static_cast<unsigned int>(sizeof(T))*N + + static_cast<unsigned int>(sizeof(U)) - 1) / + static_cast<unsigned int>(sizeof(U)), + + // NumInlineEltsElts - The number of elements actually in this array. There + // is already one in the parent class, and we have to round up to avoid + // having a zero-element array. + NumInlineEltsElts = MinUs > 1 ? (MinUs - 1) : 1, + + // NumTsAvailable - The number of T's we actually have space for, which may + // be more than N due to rounding. + NumTsAvailable = (NumInlineEltsElts+1)*static_cast<unsigned int>(sizeof(U))/ + static_cast<unsigned int>(sizeof(T)) + }; + U InlineElts[NumInlineEltsElts]; +public: + SmallVector() : SmallVectorImpl<T>(NumTsAvailable) { + } + + explicit SmallVector(unsigned Size, const T &Value = T()) + : SmallVectorImpl<T>(NumTsAvailable) { + this->reserve(Size); + while (Size--) + this->push_back(Value); + } + + template<typename ItTy> + SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(NumTsAvailable) { + this->append(S, E); + } + + SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(NumTsAvailable) { + if (!RHS.empty()) + SmallVectorImpl<T>::operator=(RHS); + } + + const SmallVector &operator=(const SmallVector &RHS) { + SmallVectorImpl<T>::operator=(RHS); + return *this; + } + +}; + +} // End llvm namespace + +namespace std { + /// Implement std::swap in terms of SmallVector swap. + template<typename T> + inline void + swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) { + LHS.swap(RHS); + } + + /// Implement std::swap in terms of SmallVector swap. + template<typename T, unsigned N> + inline void + swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) { + LHS.swap(RHS); + } +} + +#endif diff --git a/include/llvm/ADT/SparseBitVector.h b/include/llvm/ADT/SparseBitVector.h new file mode 100644 index 0000000..6230135 --- /dev/null +++ b/include/llvm/ADT/SparseBitVector.h @@ -0,0 +1,901 @@ +//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector -*- C++ -*- ===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SparseBitVector class. See the doxygen comment for +// SparseBitVector for more details on the algorithm used. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SPARSEBITVECTOR_H +#define LLVM_ADT_SPARSEBITVECTOR_H + +#include <cassert> +#include <climits> +#include <cstring> +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/ADT/ilist.h" + +namespace llvm { + +/// SparseBitVector is an implementation of a bitvector that is sparse by only +/// storing the elements that have non-zero bits set. In order to make this +/// fast for the most common cases, SparseBitVector is implemented as a linked +/// list of SparseBitVectorElements. We maintain a pointer to the last +/// SparseBitVectorElement accessed (in the form of a list iterator), in order +/// to make multiple in-order test/set constant time after the first one is +/// executed. Note that using vectors to store SparseBitVectorElement's does +/// not work out very well because it causes insertion in the middle to take +/// enormous amounts of time with a large amount of bits. Other structures that +/// have better worst cases for insertion in the middle (various balanced trees, +/// etc) do not perform as well in practice as a linked list with this iterator +/// kept up to date. They are also significantly more memory intensive. + + +template <unsigned ElementSize = 128> +struct SparseBitVectorElement + : ilist_node<SparseBitVectorElement<ElementSize> > { +public: + typedef unsigned long BitWord; + enum { + BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT, + BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE, + BITS_PER_ELEMENT = ElementSize + }; + +private: + // Index of Element in terms of where first bit starts. + unsigned ElementIndex; + BitWord Bits[BITWORDS_PER_ELEMENT]; + // Needed for sentinels + friend struct ilist_sentinel_traits<SparseBitVectorElement>; + SparseBitVectorElement() { + ElementIndex = ~0U; + memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT); + } + +public: + explicit SparseBitVectorElement(unsigned Idx) { + ElementIndex = Idx; + memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT); + } + + // Comparison. + bool operator==(const SparseBitVectorElement &RHS) const { + if (ElementIndex != RHS.ElementIndex) + return false; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) + if (Bits[i] != RHS.Bits[i]) + return false; + return true; + } + + bool operator!=(const SparseBitVectorElement &RHS) const { + return !(*this == RHS); + } + + // Return the bits that make up word Idx in our element. + BitWord word(unsigned Idx) const { + assert (Idx < BITWORDS_PER_ELEMENT); + return Bits[Idx]; + } + + unsigned index() const { + return ElementIndex; + } + + bool empty() const { + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) + if (Bits[i]) + return false; + return true; + } + + void set(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE); + } + + bool test_and_set (unsigned Idx) { + bool old = test(Idx); + if (!old) { + set(Idx); + return true; + } + return false; + } + + void reset(unsigned Idx) { + Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE)); + } + + bool test(unsigned Idx) const { + return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE)); + } + + unsigned count() const { + unsigned NumBits = 0; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) + if (sizeof(BitWord) == 4) + NumBits += CountPopulation_32(Bits[i]); + else if (sizeof(BitWord) == 8) + NumBits += CountPopulation_64(Bits[i]); + else + assert(0 && "Unsupported!"); + return NumBits; + } + + /// find_first - Returns the index of the first set bit. + int find_first() const { + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) + if (Bits[i] != 0) { + if (sizeof(BitWord) == 4) + return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]); + else if (sizeof(BitWord) == 8) + return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); + else + assert(0 && "Unsupported!"); + } + assert(0 && "Illegal empty element"); + return 0; // Not reached + } + + /// find_next - Returns the index of the next set bit starting from the + /// "Curr" bit. Returns -1 if the next set bit is not found. + int find_next(unsigned Curr) const { + if (Curr >= BITS_PER_ELEMENT) + return -1; + + unsigned WordPos = Curr / BITWORD_SIZE; + unsigned BitPos = Curr % BITWORD_SIZE; + BitWord Copy = Bits[WordPos]; + assert (WordPos <= BITWORDS_PER_ELEMENT + && "Word Position outside of element"); + + // Mask off previous bits. + Copy &= ~0L << BitPos; + + if (Copy != 0) { + if (sizeof(BitWord) == 4) + return WordPos * BITWORD_SIZE + CountTrailingZeros_32(Copy); + else if (sizeof(BitWord) == 8) + return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy); + else + assert(0 && "Unsupported!"); + } + + // Check subsequent words. + for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i) + if (Bits[i] != 0) { + if (sizeof(BitWord) == 4) + return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]); + else if (sizeof(BitWord) == 8) + return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); + else + assert(0 && "Unsupported!"); + } + return -1; + } + + // Union this element with RHS and return true if this one changed. + bool unionWith(const SparseBitVectorElement &RHS) { + bool changed = false; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { + BitWord old = changed ? 0 : Bits[i]; + + Bits[i] |= RHS.Bits[i]; + if (!changed && old != Bits[i]) + changed = true; + } + return changed; + } + + // Return true if we have any bits in common with RHS + bool intersects(const SparseBitVectorElement &RHS) const { + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { + if (RHS.Bits[i] & Bits[i]) + return true; + } + return false; + } + + // Intersect this Element with RHS and return true if this one changed. + // BecameZero is set to true if this element became all-zero bits. + bool intersectWith(const SparseBitVectorElement &RHS, + bool &BecameZero) { + bool changed = false; + bool allzero = true; + + BecameZero = false; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { + BitWord old = changed ? 0 : Bits[i]; + + Bits[i] &= RHS.Bits[i]; + if (Bits[i] != 0) + allzero = false; + + if (!changed && old != Bits[i]) + changed = true; + } + BecameZero = allzero; + return changed; + } + // Intersect this Element with the complement of RHS and return true if this + // one changed. BecameZero is set to true if this element became all-zero + // bits. + bool intersectWithComplement(const SparseBitVectorElement &RHS, + bool &BecameZero) { + bool changed = false; + bool allzero = true; + + BecameZero = false; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { + BitWord old = changed ? 0 : Bits[i]; + + Bits[i] &= ~RHS.Bits[i]; + if (Bits[i] != 0) + allzero = false; + + if (!changed && old != Bits[i]) + changed = true; + } + BecameZero = allzero; + return changed; + } + // Three argument version of intersectWithComplement that intersects + // RHS1 & ~RHS2 into this element + void intersectWithComplement(const SparseBitVectorElement &RHS1, + const SparseBitVectorElement &RHS2, + bool &BecameZero) { + bool allzero = true; + + BecameZero = false; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { + Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i]; + if (Bits[i] != 0) + allzero = false; + } + BecameZero = allzero; + } + + // Get a hash value for this element; + uint64_t getHashValue() const { + uint64_t HashVal = 0; + for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { + HashVal ^= Bits[i]; + } + return HashVal; + } +}; + +template <unsigned ElementSize = 128> +class SparseBitVector { + typedef ilist<SparseBitVectorElement<ElementSize> > ElementList; + typedef typename ElementList::iterator ElementListIter; + typedef typename ElementList::const_iterator ElementListConstIter; + enum { + BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE + }; + + // Pointer to our current Element. + ElementListIter CurrElementIter; + ElementList Elements; + + // This is like std::lower_bound, except we do linear searching from the + // current position. + ElementListIter FindLowerBound(unsigned ElementIndex) { + + if (Elements.empty()) { + CurrElementIter = Elements.begin(); + return Elements.begin(); + } + + // Make sure our current iterator is valid. + if (CurrElementIter == Elements.end()) + --CurrElementIter; + + // Search from our current iterator, either backwards or forwards, + // depending on what element we are looking for. + ElementListIter ElementIter = CurrElementIter; + if (CurrElementIter->index() == ElementIndex) { + return ElementIter; + } else if (CurrElementIter->index() > ElementIndex) { + while (ElementIter != Elements.begin() + && ElementIter->index() > ElementIndex) + --ElementIter; + } else { + while (ElementIter != Elements.end() && + ElementIter->index() < ElementIndex) + ++ElementIter; + } + CurrElementIter = ElementIter; + return ElementIter; + } + + // Iterator to walk set bits in the bitmap. This iterator is a lot uglier + // than it would be, in order to be efficient. + class SparseBitVectorIterator { + private: + bool AtEnd; + + const SparseBitVector<ElementSize> *BitVector; + + // Current element inside of bitmap. + ElementListConstIter Iter; + + // Current bit number inside of our bitmap. + unsigned BitNumber; + + // Current word number inside of our element. + unsigned WordNumber; + + // Current bits from the element. + typename SparseBitVectorElement<ElementSize>::BitWord Bits; + + // Move our iterator to the first non-zero bit in the bitmap. + void AdvanceToFirstNonZero() { + if (AtEnd) + return; + if (BitVector->Elements.empty()) { + AtEnd = true; + return; + } + Iter = BitVector->Elements.begin(); + BitNumber = Iter->index() * ElementSize; + unsigned BitPos = Iter->find_first(); + BitNumber += BitPos; + WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE; + Bits = Iter->word(WordNumber); + Bits >>= BitPos % BITWORD_SIZE; + } + + // Move our iterator to the next non-zero bit. + void AdvanceToNextNonZero() { + if (AtEnd) + return; + + while (Bits && !(Bits & 1)) { + Bits >>= 1; + BitNumber += 1; + } + + // See if we ran out of Bits in this word. + if (!Bits) { + int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ; + // If we ran out of set bits in this element, move to next element. + if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) { + ++Iter; + WordNumber = 0; + + // We may run out of elements in the bitmap. + if (Iter == BitVector->Elements.end()) { + AtEnd = true; + return; + } + // Set up for next non zero word in bitmap. + BitNumber = Iter->index() * ElementSize; + NextSetBitNumber = Iter->find_first(); + BitNumber += NextSetBitNumber; + WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE; + Bits = Iter->word(WordNumber); + Bits >>= NextSetBitNumber % BITWORD_SIZE; + } else { + WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE; + Bits = Iter->word(WordNumber); + Bits >>= NextSetBitNumber % BITWORD_SIZE; + BitNumber = Iter->index() * ElementSize; + BitNumber += NextSetBitNumber; + } + } + } + public: + // Preincrement. + inline SparseBitVectorIterator& operator++() { + ++BitNumber; + Bits >>= 1; + AdvanceToNextNonZero(); + return *this; + } + + // Postincrement. + inline SparseBitVectorIterator operator++(int) { + SparseBitVectorIterator tmp = *this; + ++*this; + return tmp; + } + + // Return the current set bit number. + unsigned operator*() const { + return BitNumber; + } + + bool operator==(const SparseBitVectorIterator &RHS) const { + // If they are both at the end, ignore the rest of the fields. + if (AtEnd && RHS.AtEnd) + return true; + // Otherwise they are the same if they have the same bit number and + // bitmap. + return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber; + } + bool operator!=(const SparseBitVectorIterator &RHS) const { + return !(*this == RHS); + } + SparseBitVectorIterator(): BitVector(NULL) { + } + + + SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS, + bool end = false):BitVector(RHS) { + Iter = BitVector->Elements.begin(); + BitNumber = 0; + Bits = 0; + WordNumber = ~0; + AtEnd = end; + AdvanceToFirstNonZero(); + } + }; +public: + typedef SparseBitVectorIterator iterator; + + SparseBitVector () { + CurrElementIter = Elements.begin (); + } + + ~SparseBitVector() { + } + + // SparseBitVector copy ctor. + SparseBitVector(const SparseBitVector &RHS) { + ElementListConstIter ElementIter = RHS.Elements.begin(); + while (ElementIter != RHS.Elements.end()) { + Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter)); + ++ElementIter; + } + + CurrElementIter = Elements.begin (); + } + + // Clear. + void clear() { + Elements.clear(); + } + + // Assignment + SparseBitVector& operator=(const SparseBitVector& RHS) { + Elements.clear(); + + ElementListConstIter ElementIter = RHS.Elements.begin(); + while (ElementIter != RHS.Elements.end()) { + Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter)); + ++ElementIter; + } + + CurrElementIter = Elements.begin (); + + return *this; + } + + // Test, Reset, and Set a bit in the bitmap. + bool test(unsigned Idx) { + if (Elements.empty()) + return false; + + unsigned ElementIndex = Idx / ElementSize; + ElementListIter ElementIter = FindLowerBound(ElementIndex); + + // If we can't find an element that is supposed to contain this bit, there + // is nothing more to do. + if (ElementIter == Elements.end() || + ElementIter->index() != ElementIndex) + return false; + return ElementIter->test(Idx % ElementSize); + } + + void reset(unsigned Idx) { + if (Elements.empty()) + return; + + unsigned ElementIndex = Idx / ElementSize; + ElementListIter ElementIter = FindLowerBound(ElementIndex); + + // If we can't find an element that is supposed to contain this bit, there + // is nothing more to do. + if (ElementIter == Elements.end() || + ElementIter->index() != ElementIndex) + return; + ElementIter->reset(Idx % ElementSize); + + // When the element is zeroed out, delete it. + if (ElementIter->empty()) { + ++CurrElementIter; + Elements.erase(ElementIter); + } + } + + void set(unsigned Idx) { + unsigned ElementIndex = Idx / ElementSize; + SparseBitVectorElement<ElementSize> *Element; + ElementListIter ElementIter; + if (Elements.empty()) { + Element = new SparseBitVectorElement<ElementSize>(ElementIndex); + ElementIter = Elements.insert(Elements.end(), Element); + + } else { + ElementIter = FindLowerBound(ElementIndex); + + if (ElementIter == Elements.end() || + ElementIter->index() != ElementIndex) { + Element = new SparseBitVectorElement<ElementSize>(ElementIndex); + // We may have hit the beginning of our SparseBitVector, in which case, + // we may need to insert right after this element, which requires moving + // the current iterator forward one, because insert does insert before. + if (ElementIter != Elements.end() && + ElementIter->index() < ElementIndex) + ElementIter = Elements.insert(++ElementIter, Element); + else + ElementIter = Elements.insert(ElementIter, Element); + } + } + CurrElementIter = ElementIter; + + ElementIter->set(Idx % ElementSize); + } + + bool test_and_set (unsigned Idx) { + bool old = test(Idx); + if (!old) { + set(Idx); + return true; + } + return false; + } + + bool operator!=(const SparseBitVector &RHS) const { + return !(*this == RHS); + } + + bool operator==(const SparseBitVector &RHS) const { + ElementListConstIter Iter1 = Elements.begin(); + ElementListConstIter Iter2 = RHS.Elements.begin(); + + for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end(); + ++Iter1, ++Iter2) { + if (*Iter1 != *Iter2) + return false; + } + return Iter1 == Elements.end() && Iter2 == RHS.Elements.end(); + } + + // Union our bitmap with the RHS and return true if we changed. + bool operator|=(const SparseBitVector &RHS) { + bool changed = false; + ElementListIter Iter1 = Elements.begin(); + ElementListConstIter Iter2 = RHS.Elements.begin(); + + // If RHS is empty, we are done + if (RHS.Elements.empty()) + return false; + + while (Iter2 != RHS.Elements.end()) { + if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) { + Elements.insert(Iter1, + new SparseBitVectorElement<ElementSize>(*Iter2)); + ++Iter2; + changed = true; + } else if (Iter1->index() == Iter2->index()) { + changed |= Iter1->unionWith(*Iter2); + ++Iter1; + ++Iter2; + } else { + ++Iter1; + } + } + CurrElementIter = Elements.begin(); + return changed; + } + + // Intersect our bitmap with the RHS and return true if ours changed. + bool operator&=(const SparseBitVector &RHS) { + bool changed = false; + ElementListIter Iter1 = Elements.begin(); + ElementListConstIter Iter2 = RHS.Elements.begin(); + + // Check if both bitmaps are empty. + if (Elements.empty() && RHS.Elements.empty()) + return false; + + // Loop through, intersecting as we go, erasing elements when necessary. + while (Iter2 != RHS.Elements.end()) { + if (Iter1 == Elements.end()) { + CurrElementIter = Elements.begin(); + return changed; + } + + if (Iter1->index() > Iter2->index()) { + ++Iter2; + } else if (Iter1->index() == Iter2->index()) { + bool BecameZero; + changed |= Iter1->intersectWith(*Iter2, BecameZero); + if (BecameZero) { + ElementListIter IterTmp = Iter1; + ++Iter1; + Elements.erase(IterTmp); + } else { + ++Iter1; + } + ++Iter2; + } else { + ElementListIter IterTmp = Iter1; + ++Iter1; + Elements.erase(IterTmp); + } + } + Elements.erase(Iter1, Elements.end()); + CurrElementIter = Elements.begin(); + return changed; + } + + // Intersect our bitmap with the complement of the RHS and return true + // if ours changed. + bool intersectWithComplement(const SparseBitVector &RHS) { + bool changed = false; + ElementListIter Iter1 = Elements.begin(); + ElementListConstIter Iter2 = RHS.Elements.begin(); + + // If either our bitmap or RHS is empty, we are done + if (Elements.empty() || RHS.Elements.empty()) + return false; + + // Loop through, intersecting as we go, erasing elements when necessary. + while (Iter2 != RHS.Elements.end()) { + if (Iter1 == Elements.end()) { + CurrElementIter = Elements.begin(); + return changed; + } + + if (Iter1->index() > Iter2->index()) { + ++Iter2; + } else if (Iter1->index() == Iter2->index()) { + bool BecameZero; + changed |= Iter1->intersectWithComplement(*Iter2, BecameZero); + if (BecameZero) { + ElementListIter IterTmp = Iter1; + ++Iter1; + Elements.erase(IterTmp); + } else { + ++Iter1; + } + ++Iter2; + } else { + ++Iter1; + } + } + CurrElementIter = Elements.begin(); + return changed; + } + + bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const { + return intersectWithComplement(*RHS); + } + + + // Three argument version of intersectWithComplement. + // Result of RHS1 & ~RHS2 is stored into this bitmap. + void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1, + const SparseBitVector<ElementSize> &RHS2) + { + Elements.clear(); + CurrElementIter = Elements.begin(); + ElementListConstIter Iter1 = RHS1.Elements.begin(); + ElementListConstIter Iter2 = RHS2.Elements.begin(); + + // If RHS1 is empty, we are done + // If RHS2 is empty, we still have to copy RHS1 + if (RHS1.Elements.empty()) + return; + + // Loop through, intersecting as we go, erasing elements when necessary. + while (Iter2 != RHS2.Elements.end()) { + if (Iter1 == RHS1.Elements.end()) + return; + + if (Iter1->index() > Iter2->index()) { + ++Iter2; + } else if (Iter1->index() == Iter2->index()) { + bool BecameZero = false; + SparseBitVectorElement<ElementSize> *NewElement = + new SparseBitVectorElement<ElementSize>(Iter1->index()); + NewElement->intersectWithComplement(*Iter1, *Iter2, BecameZero); + if (!BecameZero) { + Elements.push_back(NewElement); + } + else + delete NewElement; + ++Iter1; + ++Iter2; + } else { + SparseBitVectorElement<ElementSize> *NewElement = + new SparseBitVectorElement<ElementSize>(*Iter1); + Elements.push_back(NewElement); + ++Iter1; + } + } + + // copy the remaining elements + while (Iter1 != RHS1.Elements.end()) { + SparseBitVectorElement<ElementSize> *NewElement = + new SparseBitVectorElement<ElementSize>(*Iter1); + Elements.push_back(NewElement); + ++Iter1; + } + + return; + } + + void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1, + const SparseBitVector<ElementSize> *RHS2) { + intersectWithComplement(*RHS1, *RHS2); + } + + bool intersects(const SparseBitVector<ElementSize> *RHS) const { + return intersects(*RHS); + } + + // Return true if we share any bits in common with RHS + bool intersects(const SparseBitVector<ElementSize> &RHS) const { + ElementListConstIter Iter1 = Elements.begin(); + ElementListConstIter Iter2 = RHS.Elements.begin(); + + // Check if both bitmaps are empty. + if (Elements.empty() && RHS.Elements.empty()) + return false; + + // Loop through, intersecting stopping when we hit bits in common. + while (Iter2 != RHS.Elements.end()) { + if (Iter1 == Elements.end()) + return false; + + if (Iter1->index() > Iter2->index()) { + ++Iter2; + } else if (Iter1->index() == Iter2->index()) { + if (Iter1->intersects(*Iter2)) + return true; + ++Iter1; + ++Iter2; + } else { + ++Iter1; + } + } + return false; + } + + // Return true iff all bits set in this SparseBitVector are + // also set in RHS. + bool contains(const SparseBitVector<ElementSize> &RHS) const { + SparseBitVector<ElementSize> Result(*this); + Result &= RHS; + return (Result == RHS); + } + + // Return the first set bit in the bitmap. Return -1 if no bits are set. + int find_first() const { + if (Elements.empty()) + return -1; + const SparseBitVectorElement<ElementSize> &First = *(Elements.begin()); + return (First.index() * ElementSize) + First.find_first(); + } + + // Return true if the SparseBitVector is empty + bool empty() const { + return Elements.empty(); + } + + unsigned count() const { + unsigned BitCount = 0; + for (ElementListConstIter Iter = Elements.begin(); + Iter != Elements.end(); + ++Iter) + BitCount += Iter->count(); + + return BitCount; + } + iterator begin() const { + return iterator(this); + } + + iterator end() const { + return iterator(this, true); + } + + // Get a hash value for this bitmap. + uint64_t getHashValue() const { + uint64_t HashVal = 0; + for (ElementListConstIter Iter = Elements.begin(); + Iter != Elements.end(); + ++Iter) { + HashVal ^= Iter->index(); + HashVal ^= Iter->getHashValue(); + } + return HashVal; + } +}; + +// Convenience functions to allow Or and And without dereferencing in the user +// code. + +template <unsigned ElementSize> +inline bool operator |=(SparseBitVector<ElementSize> &LHS, + const SparseBitVector<ElementSize> *RHS) { + return LHS |= *RHS; +} + +template <unsigned ElementSize> +inline bool operator |=(SparseBitVector<ElementSize> *LHS, + const SparseBitVector<ElementSize> &RHS) { + return LHS->operator|=(RHS); +} + +template <unsigned ElementSize> +inline bool operator &=(SparseBitVector<ElementSize> *LHS, + const SparseBitVector<ElementSize> &RHS) { + return LHS->operator&=(RHS); +} + +template <unsigned ElementSize> +inline bool operator &=(SparseBitVector<ElementSize> &LHS, + const SparseBitVector<ElementSize> *RHS) { + return LHS &= *RHS; +} + +// Convenience functions for infix union, intersection, difference operators. + +template <unsigned ElementSize> +inline SparseBitVector<ElementSize> +operator|(const SparseBitVector<ElementSize> &LHS, + const SparseBitVector<ElementSize> &RHS) { + SparseBitVector<ElementSize> Result(LHS); + Result |= RHS; + return Result; +} + +template <unsigned ElementSize> +inline SparseBitVector<ElementSize> +operator&(const SparseBitVector<ElementSize> &LHS, + const SparseBitVector<ElementSize> &RHS) { + SparseBitVector<ElementSize> Result(LHS); + Result &= RHS; + return Result; +} + +template <unsigned ElementSize> +inline SparseBitVector<ElementSize> +operator-(const SparseBitVector<ElementSize> &LHS, + const SparseBitVector<ElementSize> &RHS) { + SparseBitVector<ElementSize> Result; + Result.intersectWithComplement(LHS, RHS); + return Result; +} + + + + +// Dump a SparseBitVector to a stream +template <unsigned ElementSize> +void dump(const SparseBitVector<ElementSize> &LHS, llvm::OStream &out) { + out << "[ "; + + typename SparseBitVector<ElementSize>::iterator bi; + for (bi = LHS.begin(); bi != LHS.end(); ++bi) { + out << *bi << " "; + } + out << " ]\n"; +} +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/Statistic.h b/include/llvm/ADT/Statistic.h new file mode 100644 index 0000000..537f866 --- /dev/null +++ b/include/llvm/ADT/Statistic.h @@ -0,0 +1,75 @@ +//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the 'Statistic' class, which is designed to be an easy way +// to expose various metrics from passes. These statistics are printed at the +// end of a run (from llvm_shutdown), when the -stats command line option is +// passed on the command line. +// +// This is useful for reporting information like the number of instructions +// simplified, optimized or removed by various transformations, like this: +// +// static Statistic NumInstsKilled("gcse", "Number of instructions killed"); +// +// Later, in the code: ++NumInstsKilled; +// +// NOTE: Statistics *must* be declared as global variables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STATISTIC_H +#define LLVM_ADT_STATISTIC_H + +namespace llvm { + +class Statistic { +public: + const char *Name; + const char *Desc; + unsigned Value : 31; + bool Initialized : 1; + + unsigned getValue() const { return Value; } + const char *getName() const { return Name; } + const char *getDesc() const { return Desc; } + + /// construct - This should only be called for non-global statistics. + void construct(const char *name, const char *desc) { + Name = name; Desc = desc; + Value = 0; Initialized = 0; + } + + // Allow use of this class as the value itself. + operator unsigned() const { return Value; } + const Statistic &operator=(unsigned Val) { Value = Val; return init(); } + const Statistic &operator++() { ++Value; return init(); } + unsigned operator++(int) { init(); return Value++; } + const Statistic &operator--() { --Value; return init(); } + unsigned operator--(int) { init(); return Value--; } + const Statistic &operator+=(const unsigned &V) { Value += V; return init(); } + const Statistic &operator-=(const unsigned &V) { Value -= V; return init(); } + const Statistic &operator*=(const unsigned &V) { Value *= V; return init(); } + const Statistic &operator/=(const unsigned &V) { Value /= V; return init(); } + +protected: + Statistic &init() { + if (!Initialized) RegisterStatistic(); + return *this; + } + void RegisterStatistic(); +}; + +// STATISTIC - A macro to make definition of statistics really simple. This +// automatically passes the DEBUG_TYPE of the file into the statistic. +#define STATISTIC(VARNAME, DESC) \ + static llvm::Statistic VARNAME = { DEBUG_TYPE, DESC, 0, 0 } + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h new file mode 100644 index 0000000..e40e409 --- /dev/null +++ b/include/llvm/ADT/StringExtras.h @@ -0,0 +1,234 @@ +//===-- llvm/ADT/StringExtras.h - Useful string functions -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains some functions that are useful when dealing with strings. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STRINGEXTRAS_H +#define LLVM_ADT_STRINGEXTRAS_H + +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/APFloat.h" +#include <cctype> +#include <cstdio> +#include <string> +#include <vector> + +namespace llvm { + +/// hexdigit - Return the (uppercase) hexadecimal character for the +/// given number \arg X (which should be less than 16). +static inline char hexdigit(unsigned X) { + return X < 10 ? '0' + X : 'A' + X - 10; +} + +/// utohex_buffer - Emit the specified number into the buffer specified by +/// BufferEnd, returning a pointer to the start of the string. This can be used +/// like this: (note that the buffer must be large enough to handle any number): +/// char Buffer[40]; +/// printf("0x%s", utohex_buffer(X, Buffer+40)); +/// +/// This should only be used with unsigned types. +/// +template<typename IntTy> +static inline char *utohex_buffer(IntTy X, char *BufferEnd) { + char *BufPtr = BufferEnd; + *--BufPtr = 0; // Null terminate buffer. + if (X == 0) { + *--BufPtr = '0'; // Handle special case. + return BufPtr; + } + + while (X) { + unsigned char Mod = static_cast<unsigned char>(X) & 15; + *--BufPtr = hexdigit(Mod); + X >>= 4; + } + return BufPtr; +} + +static inline std::string utohexstr(uint64_t X) { + char Buffer[40]; + return utohex_buffer(X, Buffer+40); +} + +static inline std::string utostr_32(uint32_t X, bool isNeg = false) { + char Buffer[20]; + char *BufPtr = Buffer+19; + + *BufPtr = 0; // Null terminate buffer... + if (X == 0) *--BufPtr = '0'; // Handle special case... + + while (X) { + *--BufPtr = '0' + char(X % 10); + X /= 10; + } + + if (isNeg) *--BufPtr = '-'; // Add negative sign... + + return std::string(BufPtr); +} + +static inline std::string utostr(uint64_t X, bool isNeg = false) { + if (X == uint32_t(X)) + return utostr_32(uint32_t(X), isNeg); + + char Buffer[40]; + char *BufPtr = Buffer+39; + + *BufPtr = 0; // Null terminate buffer... + if (X == 0) *--BufPtr = '0'; // Handle special case... + + while (X) { + *--BufPtr = '0' + char(X % 10); + X /= 10; + } + + if (isNeg) *--BufPtr = '-'; // Add negative sign... + return std::string(BufPtr); +} + + +static inline std::string itostr(int64_t X) { + if (X < 0) + return utostr(static_cast<uint64_t>(-X), true); + else + return utostr(static_cast<uint64_t>(X)); +} + +static inline std::string itohexstr(int64_t X) { + return utohexstr(static_cast<uint64_t>(X)); +} + +static inline std::string ftostr(double V) { + char Buffer[200]; + sprintf(Buffer, "%20.6e", V); + char *B = Buffer; + while (*B == ' ') ++B; + return B; +} + +static inline std::string ftostr(const APFloat& V) { + if (&V.getSemantics() == &APFloat::IEEEdouble) + return ftostr(V.convertToDouble()); + else if (&V.getSemantics() == &APFloat::IEEEsingle) + return ftostr((double)V.convertToFloat()); + return "<unknown format in ftostr>"; // error +} + +static inline std::string LowercaseString(const std::string &S) { + std::string result(S); + for (unsigned i = 0; i < S.length(); ++i) + if (isupper(result[i])) + result[i] = char(tolower(result[i])); + return result; +} + +static inline std::string UppercaseString(const std::string &S) { + std::string result(S); + for (unsigned i = 0; i < S.length(); ++i) + if (islower(result[i])) + result[i] = char(toupper(result[i])); + return result; +} + +/// StringsEqualNoCase - Return true if the two strings are equal, ignoring +/// case. +static inline bool StringsEqualNoCase(const std::string &LHS, + const std::string &RHS) { + if (LHS.size() != RHS.size()) return false; + for (unsigned i = 0, e = static_cast<unsigned>(LHS.size()); i != e; ++i) + if (tolower(LHS[i]) != tolower(RHS[i])) return false; + return true; +} + +/// StringsEqualNoCase - Return true if the two strings are equal, ignoring +/// case. +static inline bool StringsEqualNoCase(const std::string &LHS, + const char *RHS) { + for (unsigned i = 0, e = static_cast<unsigned>(LHS.size()); i != e; ++i) { + if (RHS[i] == 0) return false; // RHS too short. + if (tolower(LHS[i]) != tolower(RHS[i])) return false; + } + return RHS[LHS.size()] == 0; // Not too long? +} + +/// StringsEqualNoCase - Return true if the two null-terminated C strings are +/// equal, ignoring + +static inline bool StringsEqualNoCase(const char *LHS, const char *RHS, + unsigned len) { + + for (unsigned i = 0; i < len; ++i) { + if (tolower(LHS[i]) != tolower(RHS[i])) + return false; + + // If RHS[i] == 0 then LHS[i] == 0 or otherwise we would have returned + // at the previous branch as tolower('\0') == '\0'. + if (RHS[i] == 0) + return true; + } + + return true; +} + +/// CStrInCStrNoCase - Portable version of strcasestr. Locates the first +/// occurance of c-string 's2' in string 's1', ignoring case. Returns +/// NULL if 's2' cannot be found. +static inline const char* CStrInCStrNoCase(const char *s1, const char *s2) { + + // Are either strings NULL or empty? + if (!s1 || !s2 || s1[0] == '\0' || s2[0] == '\0') + return 0; + + if (s1 == s2) + return s1; + + const char *I1=s1, *I2=s2; + + while (*I1 != '\0' && *I2 != '\0' ) + if (tolower(*I1) != tolower(*I2)) { // No match. Start over. + ++s1; I1 = s1; I2 = s2; + } + else { // Character match. Advance to the next character. + ++I1; ++I2; + } + + // If we exhausted all of the characters in 's2', then 's2' appears in 's1'. + return *I2 == '\0' ? s1 : 0; +} + +/// getToken - This function extracts one token from source, ignoring any +/// leading characters that appear in the Delimiters string, and ending the +/// token at any of the characters that appear in the Delimiters string. If +/// there are no tokens in the source string, an empty string is returned. +/// The Source source string is updated in place to remove the returned string +/// and any delimiter prefix from it. +std::string getToken(std::string &Source, + const char *Delimiters = " \t\n\v\f\r"); + +/// SplitString - Split up the specified string according to the specified +/// delimiters, appending the result fragments to the output list. +void SplitString(const std::string &Source, + std::vector<std::string> &OutFragments, + const char *Delimiters = " \t\n\v\f\r"); + +/// UnescapeString - Modify the argument string, turning two character sequences +/// like '\\' 'n' into '\n'. This handles: \e \a \b \f \n \r \t \v \' \\ and +/// \num (where num is a 1-3 byte octal value). +void UnescapeString(std::string &Str); + +/// EscapeString - Modify the argument string, turning '\\' and anything that +/// doesn't satisfy std::isprint into an escape sequence. +void EscapeString(std::string &Str); + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/StringMap.h b/include/llvm/ADT/StringMap.h new file mode 100644 index 0000000..a15d24e --- /dev/null +++ b/include/llvm/ADT/StringMap.h @@ -0,0 +1,504 @@ +//===--- StringMap.h - String Hash table map interface ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the StringMap class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STRINGMAP_H +#define LLVM_ADT_STRINGMAP_H + +#include "llvm/Support/Allocator.h" +#include <cstring> +#include <string> + +namespace llvm { + template<typename ValueT> + class StringMapConstIterator; + template<typename ValueT> + class StringMapIterator; + template<typename ValueTy> + class StringMapEntry; + +/// StringMapEntryInitializer - This datatype can be partially specialized for +/// various datatypes in a stringmap to allow them to be initialized when an +/// entry is default constructed for the map. +template<typename ValueTy> +class StringMapEntryInitializer { +public: + template <typename InitTy> + static void Initialize(StringMapEntry<ValueTy> &T, InitTy InitVal) { + T.second = InitVal; + } +}; + + +/// StringMapEntryBase - Shared base class of StringMapEntry instances. +class StringMapEntryBase { + unsigned StrLen; +public: + explicit StringMapEntryBase(unsigned Len) : StrLen(Len) {} + + unsigned getKeyLength() const { return StrLen; } +}; + +/// StringMapImpl - This is the base class of StringMap that is shared among +/// all of its instantiations. +class StringMapImpl { +public: + /// ItemBucket - The hash table consists of an array of these. If Item is + /// non-null, this is an extant entry, otherwise, it is a hole. + struct ItemBucket { + /// FullHashValue - This remembers the full hash value of the key for + /// easy scanning. + unsigned FullHashValue; + + /// Item - This is a pointer to the actual item object. + StringMapEntryBase *Item; + }; + +protected: + ItemBucket *TheTable; + unsigned NumBuckets; + unsigned NumItems; + unsigned NumTombstones; + unsigned ItemSize; +protected: + explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) { + // Initialize the map with zero buckets to allocation. + TheTable = 0; + NumBuckets = 0; + NumItems = 0; + NumTombstones = 0; + } + StringMapImpl(unsigned InitSize, unsigned ItemSize); + void RehashTable(); + + /// ShouldRehash - Return true if the table should be rehashed after a new + /// element was recently inserted. + bool ShouldRehash() const { + // If the hash table is now more than 3/4 full, or if fewer than 1/8 of + // the buckets are empty (meaning that many are filled with tombstones), + // grow the table. + return NumItems*4 > NumBuckets*3 || + NumBuckets-(NumItems+NumTombstones) < NumBuckets/8; + } + + /// LookupBucketFor - Look up the bucket that the specified string should end + /// up in. If it already exists as a key in the map, the Item pointer for the + /// specified bucket will be non-null. Otherwise, it will be null. In either + /// case, the FullHashValue field of the bucket will be set to the hash value + /// of the string. + unsigned LookupBucketFor(const char *KeyStart, const char *KeyEnd); + + /// FindKey - Look up the bucket that contains the specified key. If it exists + /// in the map, return the bucket number of the key. Otherwise return -1. + /// This does not modify the map. + int FindKey(const char *KeyStart, const char *KeyEnd) const; + + /// RemoveKey - Remove the specified StringMapEntry from the table, but do not + /// delete it. This aborts if the value isn't in the table. + void RemoveKey(StringMapEntryBase *V); + + /// RemoveKey - Remove the StringMapEntry for the specified key from the + /// table, returning it. If the key is not in the table, this returns null. + StringMapEntryBase *RemoveKey(const char *KeyStart, const char *KeyEnd); +private: + void init(unsigned Size); +public: + static StringMapEntryBase *getTombstoneVal() { + return (StringMapEntryBase*)-1; + } + + unsigned getNumBuckets() const { return NumBuckets; } + unsigned getNumItems() const { return NumItems; } + + bool empty() const { return NumItems == 0; } + unsigned size() const { return NumItems; } +}; + +/// StringMapEntry - This is used to represent one value that is inserted into +/// a StringMap. It contains the Value itself and the key: the string length +/// and data. +template<typename ValueTy> +class StringMapEntry : public StringMapEntryBase { +public: + ValueTy second; + + explicit StringMapEntry(unsigned strLen) + : StringMapEntryBase(strLen), second() {} + StringMapEntry(unsigned strLen, const ValueTy &V) + : StringMapEntryBase(strLen), second(V) {} + + const ValueTy &getValue() const { return second; } + ValueTy &getValue() { return second; } + + void setValue(const ValueTy &V) { second = V; } + + /// getKeyData - Return the start of the string data that is the key for this + /// value. The string data is always stored immediately after the + /// StringMapEntry object. + const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);} + + const char *first() const { return getKeyData(); } + + /// Create - Create a StringMapEntry for the specified key and default + /// construct the value. + template<typename AllocatorTy, typename InitType> + static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd, + AllocatorTy &Allocator, + InitType InitVal) { + unsigned KeyLength = static_cast<unsigned>(KeyEnd-KeyStart); + + // Okay, the item doesn't already exist, and 'Bucket' is the bucket to fill + // in. Allocate a new item with space for the string at the end and a null + // terminator. + + unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+ + KeyLength+1; + unsigned Alignment = alignof<StringMapEntry>(); + + StringMapEntry *NewItem = + static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment)); + + // Default construct the value. + new (NewItem) StringMapEntry(KeyLength); + + // Copy the string information. + char *StrBuffer = const_cast<char*>(NewItem->getKeyData()); + memcpy(StrBuffer, KeyStart, KeyLength); + StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients. + + // Initialize the value if the client wants to. + StringMapEntryInitializer<ValueTy>::Initialize(*NewItem, InitVal); + return NewItem; + } + + template<typename AllocatorTy> + static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd, + AllocatorTy &Allocator) { + return Create(KeyStart, KeyEnd, Allocator, 0); + } + + + /// Create - Create a StringMapEntry with normal malloc/free. + template<typename InitType> + static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd, + InitType InitVal) { + MallocAllocator A; + return Create(KeyStart, KeyEnd, A, InitVal); + } + + static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd) { + return Create(KeyStart, KeyEnd, ValueTy()); + } + + /// GetStringMapEntryFromValue - Given a value that is known to be embedded + /// into a StringMapEntry, return the StringMapEntry itself. + static StringMapEntry &GetStringMapEntryFromValue(ValueTy &V) { + StringMapEntry *EPtr = 0; + char *Ptr = reinterpret_cast<char*>(&V) - + (reinterpret_cast<char*>(&EPtr->second) - + reinterpret_cast<char*>(EPtr)); + return *reinterpret_cast<StringMapEntry*>(Ptr); + } + static const StringMapEntry &GetStringMapEntryFromValue(const ValueTy &V) { + return GetStringMapEntryFromValue(const_cast<ValueTy&>(V)); + } + + /// Destroy - Destroy this StringMapEntry, releasing memory back to the + /// specified allocator. + template<typename AllocatorTy> + void Destroy(AllocatorTy &Allocator) { + // Free memory referenced by the item. + this->~StringMapEntry(); + Allocator.Deallocate(this); + } + + /// Destroy this object, releasing memory back to the malloc allocator. + void Destroy() { + MallocAllocator A; + Destroy(A); + } +}; + + +/// StringMap - This is an unconventional map that is specialized for handling +/// keys that are "strings", which are basically ranges of bytes. This does some +/// funky memory allocation and hashing things to make it extremely efficient, +/// storing the string data *after* the value in the map. +template<typename ValueTy, typename AllocatorTy = MallocAllocator> +class StringMap : public StringMapImpl { + AllocatorTy Allocator; + typedef StringMapEntry<ValueTy> MapEntryTy; +public: + StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {} + explicit StringMap(unsigned InitialSize) + : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {} + explicit StringMap(const StringMap &RHS) + : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) { + assert(RHS.empty() && + "Copy ctor from non-empty stringmap not implemented yet!"); + } + void operator=(const StringMap &RHS) { + assert(RHS.empty() && + "assignment from non-empty stringmap not implemented yet!"); + clear(); + } + + + AllocatorTy &getAllocator() { return Allocator; } + const AllocatorTy &getAllocator() const { return Allocator; } + + typedef const char* key_type; + typedef ValueTy mapped_type; + typedef StringMapEntry<ValueTy> value_type; + typedef size_t size_type; + + typedef StringMapConstIterator<ValueTy> const_iterator; + typedef StringMapIterator<ValueTy> iterator; + + iterator begin() { + return iterator(TheTable, NumBuckets == 0); + } + iterator end() { + return iterator(TheTable+NumBuckets, true); + } + const_iterator begin() const { + return const_iterator(TheTable, NumBuckets == 0); + } + const_iterator end() const { + return const_iterator(TheTable+NumBuckets, true); + } + + iterator find(const char *KeyStart, const char *KeyEnd) { + int Bucket = FindKey(KeyStart, KeyEnd); + if (Bucket == -1) return end(); + return iterator(TheTable+Bucket); + } + iterator find(const char *Key) { + return find(Key, Key + strlen(Key)); + } + iterator find(const std::string &Key) { + return find(Key.data(), Key.data() + Key.size()); + } + + const_iterator find(const char *KeyStart, const char *KeyEnd) const { + int Bucket = FindKey(KeyStart, KeyEnd); + if (Bucket == -1) return end(); + return const_iterator(TheTable+Bucket); + } + const_iterator find(const char *Key) const { + return find(Key, Key + strlen(Key)); + } + const_iterator find(const std::string &Key) const { + return find(Key.data(), Key.data() + Key.size()); + } + + /// lookup - Return the entry for the specified key, or a default + /// constructed value if no such entry exists. + ValueTy lookup(const char *KeyStart, const char *KeyEnd) const { + const_iterator it = find(KeyStart, KeyEnd); + if (it != end()) + return it->second; + return ValueTy(); + } + ValueTy lookup(const char *Key) const { + const_iterator it = find(Key); + if (it != end()) + return it->second; + return ValueTy(); + } + ValueTy lookup(const std::string &Key) const { + const_iterator it = find(Key); + if (it != end()) + return it->second; + return ValueTy(); + } + + ValueTy& operator[](const char *Key) { + return GetOrCreateValue(Key, Key + strlen(Key)).getValue(); + } + ValueTy& operator[](const std::string &Key) { + return GetOrCreateValue(Key.data(), Key.data() + Key.size()).getValue(); + } + + size_type count(const char *KeyStart, const char *KeyEnd) const { + return find(KeyStart, KeyEnd) == end() ? 0 : 1; + } + size_type count(const char *Key) const { + return count(Key, Key + strlen(Key)); + } + size_type count(const std::string &Key) const { + return count(Key.data(), Key.data() + Key.size()); + } + + /// insert - Insert the specified key/value pair into the map. If the key + /// already exists in the map, return false and ignore the request, otherwise + /// insert it and return true. + bool insert(MapEntryTy *KeyValue) { + unsigned BucketNo = + LookupBucketFor(KeyValue->getKeyData(), + KeyValue->getKeyData()+KeyValue->getKeyLength()); + ItemBucket &Bucket = TheTable[BucketNo]; + if (Bucket.Item && Bucket.Item != getTombstoneVal()) + return false; // Already exists in map. + + if (Bucket.Item == getTombstoneVal()) + --NumTombstones; + Bucket.Item = KeyValue; + ++NumItems; + + if (ShouldRehash()) + RehashTable(); + return true; + } + + // clear - Empties out the StringMap + void clear() { + if (empty()) return; + + // Zap all values, resetting the keys back to non-present (not tombstone), + // which is safe because we're removing all elements. + for (ItemBucket *I = TheTable, *E = TheTable+NumBuckets; I != E; ++I) { + if (I->Item && I->Item != getTombstoneVal()) { + static_cast<MapEntryTy*>(I->Item)->Destroy(Allocator); + I->Item = 0; + } + } + + NumItems = 0; + } + + /// GetOrCreateValue - Look up the specified key in the table. If a value + /// exists, return it. Otherwise, default construct a value, insert it, and + /// return. + template <typename InitTy> + StringMapEntry<ValueTy> &GetOrCreateValue(const char *KeyStart, + const char *KeyEnd, + InitTy Val) { + unsigned BucketNo = LookupBucketFor(KeyStart, KeyEnd); + ItemBucket &Bucket = TheTable[BucketNo]; + if (Bucket.Item && Bucket.Item != getTombstoneVal()) + return *static_cast<MapEntryTy*>(Bucket.Item); + + MapEntryTy *NewItem = MapEntryTy::Create(KeyStart, KeyEnd, Allocator, Val); + + if (Bucket.Item == getTombstoneVal()) + --NumTombstones; + ++NumItems; + + // Fill in the bucket for the hash table. The FullHashValue was already + // filled in by LookupBucketFor. + Bucket.Item = NewItem; + + if (ShouldRehash()) + RehashTable(); + return *NewItem; + } + + StringMapEntry<ValueTy> &GetOrCreateValue(const char *KeyStart, + const char *KeyEnd) { + return GetOrCreateValue(KeyStart, KeyEnd, ValueTy()); + } + + /// remove - Remove the specified key/value pair from the map, but do not + /// erase it. This aborts if the key is not in the map. + void remove(MapEntryTy *KeyValue) { + RemoveKey(KeyValue); + } + + void erase(iterator I) { + MapEntryTy &V = *I; + remove(&V); + V.Destroy(Allocator); + } + + bool erase(const char *Key) { + iterator I = find(Key); + if (I == end()) return false; + erase(I); + return true; + } + + bool erase(const std::string &Key) { + iterator I = find(Key); + if (I == end()) return false; + erase(I); + return true; + } + + ~StringMap() { + clear(); + free(TheTable); + } +}; + + +template<typename ValueTy> +class StringMapConstIterator { +protected: + StringMapImpl::ItemBucket *Ptr; +public: + typedef StringMapEntry<ValueTy> value_type; + + explicit StringMapConstIterator(StringMapImpl::ItemBucket *Bucket, + bool NoAdvance = false) + : Ptr(Bucket) { + if (!NoAdvance) AdvancePastEmptyBuckets(); + } + + const value_type &operator*() const { + return *static_cast<StringMapEntry<ValueTy>*>(Ptr->Item); + } + const value_type *operator->() const { + return static_cast<StringMapEntry<ValueTy>*>(Ptr->Item); + } + + bool operator==(const StringMapConstIterator &RHS) const { + return Ptr == RHS.Ptr; + } + bool operator!=(const StringMapConstIterator &RHS) const { + return Ptr != RHS.Ptr; + } + + inline StringMapConstIterator& operator++() { // Preincrement + ++Ptr; + AdvancePastEmptyBuckets(); + return *this; + } + StringMapConstIterator operator++(int) { // Postincrement + StringMapConstIterator tmp = *this; ++*this; return tmp; + } + +private: + void AdvancePastEmptyBuckets() { + while (Ptr->Item == 0 || Ptr->Item == StringMapImpl::getTombstoneVal()) + ++Ptr; + } +}; + +template<typename ValueTy> +class StringMapIterator : public StringMapConstIterator<ValueTy> { +public: + explicit StringMapIterator(StringMapImpl::ItemBucket *Bucket, + bool NoAdvance = false) + : StringMapConstIterator<ValueTy>(Bucket, NoAdvance) { + } + StringMapEntry<ValueTy> &operator*() const { + return *static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item); + } + StringMapEntry<ValueTy> *operator->() const { + return static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item); + } +}; + +} + +#endif diff --git a/include/llvm/ADT/StringSet.h b/include/llvm/ADT/StringSet.h new file mode 100644 index 0000000..0004836 --- /dev/null +++ b/include/llvm/ADT/StringSet.h @@ -0,0 +1,39 @@ +//===--- StringSet.h - The LLVM Compiler Driver -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// StringSet - A set-like wrapper for the StringMap. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STRINGSET_H +#define LLVM_ADT_STRINGSET_H + +#include "llvm/ADT/StringMap.h" +#include <cassert> + +namespace llvm { + + /// StringSet - A wrapper for StringMap that provides set-like + /// functionality. Only insert() and count() methods are used by my + /// code. + template <class AllocatorTy = llvm::MallocAllocator> + class StringSet : public llvm::StringMap<char, AllocatorTy> { + typedef llvm::StringMap<char, AllocatorTy> base; + public: + bool insert(const std::string& InLang) { + assert(!InLang.empty()); + const char* KeyStart = &InLang[0]; + const char* KeyEnd = KeyStart + InLang.size(); + return base::insert(llvm::StringMapEntry<char>:: + Create(KeyStart, KeyEnd, base::getAllocator(), '+')); + } + }; +} + +#endif // LLVM_ADT_STRINGSET_H diff --git a/include/llvm/ADT/Tree.h b/include/llvm/ADT/Tree.h new file mode 100644 index 0000000..78f5b4f --- /dev/null +++ b/include/llvm/ADT/Tree.h @@ -0,0 +1,62 @@ +//===- llvm/ADT/Tree.h - Generic n-way tree structure -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class defines a generic N-way tree node structure. The tree structure +// is immutable after creation, but the payload contained within it is not. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_TREE_H +#define LLVM_ADT_TREE_H + +#include <vector> + +namespace llvm { + +template<class ConcreteTreeNode, class Payload> +class Tree { + std::vector<ConcreteTreeNode*> Children; // This node's children, if any. + ConcreteTreeNode *Parent; // Parent of this node. + Payload Data; // Data held in this node. + +protected: + void setChildren(const std::vector<ConcreteTreeNode*> &children) { + Children = children; + } +public: + inline Tree(ConcreteTreeNode *parent) : Parent(parent) {} + inline Tree(const std::vector<ConcreteTreeNode*> &children, + ConcreteTreeNode *par) : Children(children), Parent(par) {} + + inline Tree(const std::vector<ConcreteTreeNode*> &children, + ConcreteTreeNode *par, const Payload &data) + : Children(children), Parent(par), Data(data) {} + + // Tree dtor - Free all children + inline ~Tree() { + for (unsigned i = Children.size(); i > 0; --i) + delete Children[i-1]; + } + + // Tree manipulation/walking routines... + inline ConcreteTreeNode *getParent() const { return Parent; } + inline unsigned getNumChildren() const { return Children.size(); } + inline ConcreteTreeNode *getChild(unsigned i) const { + assert(i < Children.size() && "Tree::getChild with index out of range!"); + return Children[i]; + } + + // Payload access... + inline Payload &getTreeData() { return Data; } + inline const Payload &getTreeData() const { return Data; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/Trie.h b/include/llvm/ADT/Trie.h new file mode 100644 index 0000000..70f3b41 --- /dev/null +++ b/include/llvm/ADT/Trie.h @@ -0,0 +1,335 @@ +//===- llvm/ADT/Trie.h ---- Generic trie structure --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class defines a generic trie structure. The trie structure +// is immutable after creation, but the payload contained within it is not. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_TRIE_H +#define LLVM_ADT_TRIE_H + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/Support/DOTGraphTraits.h" + +#include <vector> + +namespace llvm { + +// FIXME: +// - Labels are usually small, maybe it's better to use SmallString +// - Should we use char* during construction? +// - Should we templatize Empty with traits-like interface? + +template<class Payload> +class Trie { + friend class GraphTraits<Trie<Payload> >; + friend class DOTGraphTraits<Trie<Payload> >; +public: + class Node { + friend class Trie; + + public: + typedef std::vector<Node*> NodeVectorType; + typedef typename NodeVectorType::iterator iterator; + typedef typename NodeVectorType::const_iterator const_iterator; + + private: + enum QueryResult { + Same = -3, + StringIsPrefix = -2, + LabelIsPrefix = -1, + DontMatch = 0, + HaveCommonPart + }; + + struct NodeCmp { + bool operator() (Node* N1, Node* N2) { + return (N1->Label[0] < N2->Label[0]); + } + bool operator() (Node* N, char Id) { + return (N->Label[0] < Id); + } + }; + + std::string Label; + Payload Data; + NodeVectorType Children; + + // Do not implement + Node(const Node&); + Node& operator=(const Node&); + + inline void addEdge(Node* N) { + if (Children.empty()) + Children.push_back(N); + else { + iterator I = std::lower_bound(Children.begin(), Children.end(), + N, NodeCmp()); + // FIXME: no dups are allowed + Children.insert(I, N); + } + } + + inline void setEdge(Node* N) { + char Id = N->Label[0]; + iterator I = std::lower_bound(Children.begin(), Children.end(), + Id, NodeCmp()); + assert(I != Children.end() && "Node does not exists!"); + *I = N; + } + + QueryResult query(const std::string& s) const { + unsigned i, l; + unsigned l1 = s.length(); + unsigned l2 = Label.length(); + + // Find the length of common part + l = std::min(l1, l2); + i = 0; + while ((i < l) && (s[i] == Label[i])) + ++i; + + if (i == l) { // One is prefix of another, find who is who + if (l1 == l2) + return Same; + else if (i == l1) + return StringIsPrefix; + else + return LabelIsPrefix; + } else // s and Label have common (possible empty) part, return its length + return (QueryResult)i; + } + + public: + inline explicit Node(const Payload& data, const std::string& label = ""): + Label(label), Data(data) { } + + inline const Payload& data() const { return Data; } + inline void setData(const Payload& data) { Data = data; } + + inline const std::string& label() const { return Label; } + +#if 0 + inline void dump() { + std::cerr << "Node: " << this << "\n" + << "Label: " << Label << "\n" + << "Children:\n"; + + for (iterator I = Children.begin(), E = Children.end(); I != E; ++I) + std::cerr << (*I)->Label << "\n"; + } +#endif + + inline Node* getEdge(char Id) { + Node* fNode = NULL; + iterator I = std::lower_bound(Children.begin(), Children.end(), + Id, NodeCmp()); + if (I != Children.end() && (*I)->Label[0] == Id) + fNode = *I; + + return fNode; + } + + inline iterator begin() { return Children.begin(); } + inline const_iterator begin() const { return Children.begin(); } + inline iterator end () { return Children.end(); } + inline const_iterator end () const { return Children.end(); } + + inline size_t size () const { return Children.size(); } + inline bool empty() const { return Children.empty(); } + inline const Node* &front() const { return Children.front(); } + inline Node* &front() { return Children.front(); } + inline const Node* &back() const { return Children.back(); } + inline Node* &back() { return Children.back(); } + + }; + +private: + std::vector<Node*> Nodes; + Payload Empty; + + inline Node* addNode(const Payload& data, const std::string label = "") { + Node* N = new Node(data, label); + Nodes.push_back(N); + return N; + } + + inline Node* splitEdge(Node* N, char Id, size_t index) { + Node* eNode = N->getEdge(Id); + assert(eNode && "Node doesn't exist"); + + const std::string &l = eNode->Label; + assert(index > 0 && index < l.length() && "Trying to split too far!"); + std::string l1 = l.substr(0, index); + std::string l2 = l.substr(index); + + Node* nNode = addNode(Empty, l1); + N->setEdge(nNode); + + eNode->Label = l2; + nNode->addEdge(eNode); + + return nNode; + } + + // Do not implement + Trie(const Trie&); + Trie& operator=(const Trie&); + +public: + inline explicit Trie(const Payload& empty):Empty(empty) { + addNode(Empty); + } + inline ~Trie() { + for (unsigned i = 0, e = Nodes.size(); i != e; ++i) + delete Nodes[i]; + } + + inline Node* getRoot() const { return Nodes[0]; } + + bool addString(const std::string& s, const Payload& data); + const Payload& lookup(const std::string& s) const; + +}; + +// Define this out-of-line to dissuade the C++ compiler from inlining it. +template<class Payload> +bool Trie<Payload>::addString(const std::string& s, const Payload& data) { + Node* cNode = getRoot(); + Node* tNode = NULL; + std::string s1(s); + + while (tNode == NULL) { + char Id = s1[0]; + if (Node* nNode = cNode->getEdge(Id)) { + typename Node::QueryResult r = nNode->query(s1); + + switch (r) { + case Node::Same: + case Node::StringIsPrefix: + // Currently we don't allow to have two strings in the trie one + // being a prefix of another. This should be fixed. + assert(0 && "FIXME!"); + return false; + case Node::DontMatch: + assert(0 && "Impossible!"); + return false; + case Node::LabelIsPrefix: + s1 = s1.substr(nNode->label().length()); + cNode = nNode; + break; + default: + nNode = splitEdge(cNode, Id, r); + tNode = addNode(data, s1.substr(r)); + nNode->addEdge(tNode); + } + } else { + tNode = addNode(data, s1); + cNode->addEdge(tNode); + } + } + + return true; +} + +template<class Payload> +const Payload& Trie<Payload>::lookup(const std::string& s) const { + Node* cNode = getRoot(); + Node* tNode = NULL; + std::string s1(s); + + while (tNode == NULL) { + char Id = s1[0]; + if (Node* nNode = cNode->getEdge(Id)) { + typename Node::QueryResult r = nNode->query(s1); + + switch (r) { + case Node::Same: + tNode = nNode; + break; + case Node::StringIsPrefix: + return Empty; + case Node::DontMatch: + assert(0 && "Impossible!"); + return Empty; + case Node::LabelIsPrefix: + s1 = s1.substr(nNode->label().length()); + cNode = nNode; + break; + default: + return Empty; + } + } else + return Empty; + } + + return tNode->data(); +} + +template<class Payload> +struct GraphTraits<Trie<Payload> > { + typedef Trie<Payload> TrieType; + typedef typename TrieType::Node NodeType; + typedef typename NodeType::iterator ChildIteratorType; + + static inline NodeType *getEntryNode(const TrieType& T) { + return T.getRoot(); + } + + static inline ChildIteratorType child_begin(NodeType *N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { return N->end(); } + + typedef typename std::vector<NodeType*>::const_iterator nodes_iterator; + + static inline nodes_iterator nodes_begin(const TrieType& G) { + return G.Nodes.begin(); + } + static inline nodes_iterator nodes_end(const TrieType& G) { + return G.Nodes.end(); + } + +}; + +template<class Payload> +struct DOTGraphTraits<Trie<Payload> > : public DefaultDOTGraphTraits { + typedef typename Trie<Payload>::Node NodeType; + typedef typename GraphTraits<Trie<Payload> >::ChildIteratorType EdgeIter; + + static std::string getGraphName(const Trie<Payload>& T) { + return "Trie"; + } + + static std::string getNodeLabel(NodeType* Node, const Trie<Payload>& T) { + if (T.getRoot() == Node) + return "<Root>"; + else + return Node->label(); + } + + static std::string getEdgeSourceLabel(NodeType* Node, EdgeIter I) { + NodeType* N = *I; + return N->label().substr(0, 1); + } + + static std::string getNodeAttributes(const NodeType* Node, + const Trie<Payload>& T) { + if (Node->data() != T.Empty) + return "color=blue"; + + return ""; + } + +}; + +} // end of llvm namespace + +#endif // LLVM_ADT_TRIE_H diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h new file mode 100644 index 0000000..b260f98 --- /dev/null +++ b/include/llvm/ADT/Triple.h @@ -0,0 +1,204 @@ +//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_TRIPLE_H +#define LLVM_ADT_TRIPLE_H + +#include <string> + +namespace llvm { + +/// Triple - Helper class for working with target triples. +/// +/// Target triples are strings in the format of: +/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM +/// or +/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT +/// +/// This class is used for clients which want to support arbitrary +/// target triples, but also want to implement certain special +/// behavior for particular targets. This class isolates the mapping +/// from the components of the target triple to well known IDs. +/// +/// See autoconf/config.guess for a glimpse into what they look like +/// in practice. +class Triple { +public: + enum ArchType { + UnknownArch, + + x86, // i?86 + ppc, // powerpc + ppc64, // powerpc64 + x86_64, // amd64, x86_64 + + InvalidArch + }; + enum VendorType { + UnknownVendor, + + Apple, + PC + }; + enum OSType { + UnknownOS, + + Darwin, + DragonFly, + FreeBSD, + Linux + }; + +private: + std::string Data; + + /// The parsed arch type (or InvalidArch if uninitialized). + mutable ArchType Arch; + + /// The parsed vendor type. + mutable VendorType Vendor; + + /// The parsed OS type. + mutable OSType OS; + + bool isInitialized() const { return Arch != InvalidArch; } + void Parse() const; + +public: + /// @name Constructors + /// @{ + + Triple() : Data(""), Arch(InvalidArch) {} + explicit Triple(const char *Str) : Data(Str), Arch(InvalidArch) {} + explicit Triple(const char *ArchStr, const char *VendorStr, const char *OSStr) + : Data(ArchStr), Arch(InvalidArch) { + Data += '-'; + Data += VendorStr; + Data += '-'; + Data += OSStr; + } + + /// @} + /// @name Typed Component Access + /// @{ + + /// getArch - Get the parsed architecture type of this triple. + ArchType getArch() const { + if (!isInitialized()) Parse(); + return Arch; + } + + /// getVendor - Get the parsed vendor type of this triple. + VendorType getVendor() const { + if (!isInitialized()) Parse(); + return Vendor; + } + + /// getOS - Get the parsed operating system type of this triple. + OSType getOS() const { + if (!isInitialized()) Parse(); + return OS; + } + + /// hasEnvironment - Does this triple have the optional environment + /// (fourth) component? + bool hasEnvironment() const { + return getEnvironmentName() != ""; + } + + /// @} + /// @name Direct Component Access + /// @{ + + const std::string &getTriple() const { return Data; } + + // FIXME: Invent a lightweight string representation for these to + // use. + + /// getArchName - Get the architecture (first) component of the + /// triple. + std::string getArchName() const; + + /// getVendorName - Get the vendor (second) component of the triple. + std::string getVendorName() const; + + /// getOSName - Get the operating system (third) component of the + /// triple. + std::string getOSName() const; + + /// getEnvironmentName - Get the optional environment (fourth) + /// component of the triple, or "" if empty. + std::string getEnvironmentName() const; + + /// getOSAndEnvironmentName - Get the operating system and optional + /// environment components as a single string (separated by a '-' + /// if the environment component is present). + std::string getOSAndEnvironmentName() const; + + /// @} + /// @name Mutators + /// @{ + + /// setArch - Set the architecture (first) component of the triple + /// to a known type. + void setArch(ArchType Kind); + + /// setVendor - Set the vendor (second) component of the triple to a + /// known type. + void setVendor(VendorType Kind); + + /// setOS - Set the operating system (third) component of the triple + /// to a known type. + void setOS(OSType Kind); + + /// setTriple - Set all components to the new triple \arg Str. + void setTriple(const std::string &Str); + + /// setArchName - Set the architecture (first) component of the + /// triple by name. + void setArchName(const std::string &Str); + + /// setVendorName - Set the vendor (second) component of the triple + /// by name. + void setVendorName(const std::string &Str); + + /// setOSName - Set the operating system (third) component of the + /// triple by name. + void setOSName(const std::string &Str); + + /// setEnvironmentName - Set the optional environment (fourth) + /// component of the triple by name. + void setEnvironmentName(const std::string &Str); + + /// setOSAndEnvironmentName - Set the operating system and optional + /// environment components with a single string. + void setOSAndEnvironmentName(const std::string &Str); + + /// @} + /// @name Static helpers for IDs. + /// @{ + + /// getArchTypeName - Get the canonical name for the \arg Kind + /// architecture. + static const char *getArchTypeName(ArchType Kind); + + /// getVendorTypeName - Get the canonical name for the \arg Kind + /// vendor. + static const char *getVendorTypeName(VendorType Kind); + + /// getOSTypeName - Get the canonical name for the \arg Kind vendor. + static const char *getOSTypeName(OSType Kind); + + /// @} +}; + +} // End llvm namespace + + +#endif diff --git a/include/llvm/ADT/UniqueVector.h b/include/llvm/ADT/UniqueVector.h new file mode 100644 index 0000000..2d02d1c --- /dev/null +++ b/include/llvm/ADT/UniqueVector.h @@ -0,0 +1,89 @@ +//===-- llvm/ADT/UniqueVector.h ---------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_UNIQUEVECTOR_H +#define LLVM_ADT_UNIQUEVECTOR_H + +#include <cassert> +#include <map> +#include <vector> + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// UniqueVector - This class produces a sequential ID number (base 1) for each +/// unique entry that is added. T is the type of entries in the vector. This +/// class should have an implementation of operator== and of operator<. +/// Entries can be fetched using operator[] with the entry ID. +template<class T> class UniqueVector { +private: + // Map - Used to handle the correspondence of entry to ID. + std::map<T, unsigned> Map; + + // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1. + // + std::vector<T> Vector; + +public: + /// insert - Append entry to the vector if it doesn't already exist. Returns + /// the entry's index + 1 to be used as a unique ID. + unsigned insert(const T &Entry) { + // Check if the entry is already in the map. + unsigned &Val = Map[Entry]; + + // See if entry exists, if so return prior ID. + if (Val) return Val; + + // Compute ID for entry. + Val = static_cast<unsigned>(Vector.size()) + 1; + + // Insert in vector. + Vector.push_back(Entry); + return Val; + } + + /// idFor - return the ID for an existing entry. Returns 0 if the entry is + /// not found. + unsigned idFor(const T &Entry) const { + // Search for entry in the map. + typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry); + + // See if entry exists, if so return ID. + if (MI != Map.end()) return MI->second; + + // No luck. + return 0; + } + + /// operator[] - Returns a reference to the entry with the specified ID. + /// + const T &operator[](unsigned ID) const { + assert(ID-1 < size() && "ID is 0 or out of range!"); + return Vector[ID - 1]; + } + + /// size - Returns the number of entries in the vector. + /// + size_t size() const { return Vector.size(); } + + /// empty - Returns true if the vector is empty. + /// + bool empty() const { return Vector.empty(); } + + /// reset - Clears all the entries. + /// + void reset() { + Map.clear(); + Vector.resize(0, 0); + } +}; + +} // End of namespace llvm + +#endif // LLVM_ADT_UNIQUEVECTOR_H diff --git a/include/llvm/ADT/VectorExtras.h b/include/llvm/ADT/VectorExtras.h new file mode 100644 index 0000000..e05f585 --- /dev/null +++ b/include/llvm/ADT/VectorExtras.h @@ -0,0 +1,41 @@ +//===-- llvm/ADT/VectorExtras.h - Helpers for std::vector -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains helper functions which are useful for working with the +// std::vector class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_VECTOREXTRAS_H +#define LLVM_ADT_VECTOREXTRAS_H + +#include <cstdarg> +#include <vector> + +namespace llvm { + +/// make_vector - Helper function which is useful for building temporary vectors +/// to pass into type construction of CallInst ctors. This turns a null +/// terminated list of pointers (or other value types) into a real live vector. +/// +template<typename T> +inline std::vector<T> make_vector(T A, ...) { + va_list Args; + va_start(Args, A); + std::vector<T> Result; + Result.push_back(A); + while (T Val = va_arg(Args, T)) + Result.push_back(Val); + va_end(Args); + return Result; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h new file mode 100644 index 0000000..9eb7005 --- /dev/null +++ b/include/llvm/ADT/ilist.h @@ -0,0 +1,709 @@ +//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines classes to implement an intrusive doubly linked list class +// (i.e. each node of the list must contain a next and previous field for the +// list. +// +// The ilist_traits trait class is used to gain access to the next and previous +// fields of the node type that the list is instantiated with. If it is not +// specialized, the list defaults to using the getPrev(), getNext() method calls +// to get the next and previous pointers. +// +// The ilist class itself, should be a plug in replacement for list, assuming +// that the nodes contain next/prev pointers. This list replacement does not +// provide a constant time size() method, so be careful to use empty() when you +// really want to know if it's empty. +// +// The ilist class is implemented by allocating a 'tail' node when the list is +// created (using ilist_traits<>::createSentinel()). This tail node is +// absolutely required because the user must be able to compute end()-1. Because +// of this, users of the direct next/prev links will see an extra link on the +// end of the list, which should be ignored. +// +// Requirements for a user of this list: +// +// 1. The user must provide {g|s}et{Next|Prev} methods, or specialize +// ilist_traits to provide an alternate way of getting and setting next and +// prev links. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_ILIST_H +#define LLVM_ADT_ILIST_H + +#include "llvm/ADT/iterator.h" +#include <cassert> + +namespace llvm { + +template<typename NodeTy, typename Traits> class iplist; +template<typename NodeTy> class ilist_iterator; + +/// ilist_nextprev_traits - A fragment for template traits for intrusive list +/// that provides default next/prev implementations for common operations. +/// +template<typename NodeTy> +struct ilist_nextprev_traits { + static NodeTy *getPrev(NodeTy *N) { return N->getPrev(); } + static NodeTy *getNext(NodeTy *N) { return N->getNext(); } + static const NodeTy *getPrev(const NodeTy *N) { return N->getPrev(); } + static const NodeTy *getNext(const NodeTy *N) { return N->getNext(); } + + static void setPrev(NodeTy *N, NodeTy *Prev) { N->setPrev(Prev); } + static void setNext(NodeTy *N, NodeTy *Next) { N->setNext(Next); } +}; + +template<typename NodeTy> +struct ilist_traits; + +/// ilist_sentinel_traits - A fragment for template traits for intrusive list +/// that provides default sentinel implementations for common operations. +/// +/// ilist_sentinel_traits implements a lazy dynamic sentinel allocation +/// strategy. The sentinel is stored in the prev field of ilist's Head. +/// +template<typename NodeTy> +struct ilist_sentinel_traits { + /// createSentinel - create the dynamic sentinel + static NodeTy *createSentinel() { return new NodeTy(); } + + /// destroySentinel - deallocate the dynamic sentinel + static void destroySentinel(NodeTy *N) { delete N; } + + /// provideInitialHead - when constructing an ilist, provide a starting + /// value for its Head + /// @return null node to indicate that it needs to be allocated later + static NodeTy *provideInitialHead() { return 0; } + + /// ensureHead - make sure that Head is either already + /// initialized or assigned a fresh sentinel + /// @return the sentinel + static NodeTy *ensureHead(NodeTy *&Head) { + if (!Head) { + Head = ilist_traits<NodeTy>::createSentinel(); + ilist_traits<NodeTy>::noteHead(Head, Head); + ilist_traits<NodeTy>::setNext(Head, 0); + return Head; + } + return ilist_traits<NodeTy>::getPrev(Head); + } + + /// noteHead - stash the sentinel into its default location + static void noteHead(NodeTy *NewHead, NodeTy *Sentinel) { + ilist_traits<NodeTy>::setPrev(NewHead, Sentinel); + } +}; + +/// ilist_node_traits - A fragment for template traits for intrusive list +/// that provides default node related operations. +/// +template<typename NodeTy> +struct ilist_node_traits { + static NodeTy *createNode(const NodeTy &V) { return new NodeTy(V); } + static void deleteNode(NodeTy *V) { delete V; } + + void addNodeToList(NodeTy *) {} + void removeNodeFromList(NodeTy *) {} + void transferNodesFromList(ilist_node_traits & /*SrcTraits*/, + ilist_iterator<NodeTy> /*first*/, + ilist_iterator<NodeTy> /*last*/) {} +}; + +/// ilist_default_traits - Default template traits for intrusive list. +/// By inheriting from this, you can easily use default implementations +/// for all common operations. +/// +template<typename NodeTy> +struct ilist_default_traits : ilist_nextprev_traits<NodeTy>, + ilist_sentinel_traits<NodeTy>, + ilist_node_traits<NodeTy> { +}; + +// Template traits for intrusive list. By specializing this template class, you +// can change what next/prev fields are used to store the links... +template<typename NodeTy> +struct ilist_traits : ilist_default_traits<NodeTy> {}; + +// Const traits are the same as nonconst traits... +template<typename Ty> +struct ilist_traits<const Ty> : public ilist_traits<Ty> {}; + +//===----------------------------------------------------------------------===// +// ilist_iterator<Node> - Iterator for intrusive list. +// +template<typename NodeTy> +class ilist_iterator + : public bidirectional_iterator<NodeTy, ptrdiff_t> { + +public: + typedef ilist_traits<NodeTy> Traits; + typedef bidirectional_iterator<NodeTy, ptrdiff_t> super; + + typedef typename super::value_type value_type; + typedef typename super::difference_type difference_type; + typedef typename super::pointer pointer; + typedef typename super::reference reference; +private: + pointer NodePtr; + + // ilist_iterator is not a random-access iterator, but it has an + // implicit conversion to pointer-type, which is. Declare (but + // don't define) these functions as private to help catch + // accidental misuse. + void operator[](difference_type) const; + void operator+(difference_type) const; + void operator-(difference_type) const; + void operator+=(difference_type) const; + void operator-=(difference_type) const; + template<class T> void operator<(T) const; + template<class T> void operator<=(T) const; + template<class T> void operator>(T) const; + template<class T> void operator>=(T) const; + template<class T> void operator-(T) const; +public: + + ilist_iterator(pointer NP) : NodePtr(NP) {} + ilist_iterator(reference NR) : NodePtr(&NR) {} + ilist_iterator() : NodePtr(0) {} + + // This is templated so that we can allow constructing a const iterator from + // a nonconst iterator... + template<class node_ty> + ilist_iterator(const ilist_iterator<node_ty> &RHS) + : NodePtr(RHS.getNodePtrUnchecked()) {} + + // This is templated so that we can allow assigning to a const iterator from + // a nonconst iterator... + template<class node_ty> + const ilist_iterator &operator=(const ilist_iterator<node_ty> &RHS) { + NodePtr = RHS.getNodePtrUnchecked(); + return *this; + } + + // Accessors... + operator pointer() const { + assert(Traits::getNext(NodePtr) != 0 && "Dereferencing end()!"); + return NodePtr; + } + + reference operator*() const { + assert(Traits::getNext(NodePtr) != 0 && "Dereferencing end()!"); + return *NodePtr; + } + pointer operator->() const { return &operator*(); } + + // Comparison operators + bool operator==(const ilist_iterator &RHS) const { + return NodePtr == RHS.NodePtr; + } + bool operator!=(const ilist_iterator &RHS) const { + return NodePtr != RHS.NodePtr; + } + + // Increment and decrement operators... + ilist_iterator &operator--() { // predecrement - Back up + NodePtr = Traits::getPrev(NodePtr); + assert(NodePtr && "--'d off the beginning of an ilist!"); + return *this; + } + ilist_iterator &operator++() { // preincrement - Advance + NodePtr = Traits::getNext(NodePtr); + assert(NodePtr && "++'d off the end of an ilist!"); + return *this; + } + ilist_iterator operator--(int) { // postdecrement operators... + ilist_iterator tmp = *this; + --*this; + return tmp; + } + ilist_iterator operator++(int) { // postincrement operators... + ilist_iterator tmp = *this; + ++*this; + return tmp; + } + + // Internal interface, do not use... + pointer getNodePtrUnchecked() const { return NodePtr; } +}; + +// do not implement. this is to catch errors when people try to use +// them as random access iterators +template<typename T> +void operator-(int, ilist_iterator<T>); +template<typename T> +void operator-(ilist_iterator<T>,int); + +template<typename T> +void operator+(int, ilist_iterator<T>); +template<typename T> +void operator+(ilist_iterator<T>,int); + +// operator!=/operator== - Allow mixed comparisons without dereferencing +// the iterator, which could very likely be pointing to end(). +template<typename T> +bool operator!=(const T* LHS, const ilist_iterator<const T> &RHS) { + return LHS != RHS.getNodePtrUnchecked(); +} +template<typename T> +bool operator==(const T* LHS, const ilist_iterator<const T> &RHS) { + return LHS == RHS.getNodePtrUnchecked(); +} +template<typename T> +bool operator!=(T* LHS, const ilist_iterator<T> &RHS) { + return LHS != RHS.getNodePtrUnchecked(); +} +template<typename T> +bool operator==(T* LHS, const ilist_iterator<T> &RHS) { + return LHS == RHS.getNodePtrUnchecked(); +} + + +// Allow ilist_iterators to convert into pointers to a node automatically when +// used by the dyn_cast, cast, isa mechanisms... + +template<typename From> struct simplify_type; + +template<typename NodeTy> struct simplify_type<ilist_iterator<NodeTy> > { + typedef NodeTy* SimpleType; + + static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) { + return &*Node; + } +}; +template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > { + typedef NodeTy* SimpleType; + + static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) { + return &*Node; + } +}; + + +//===----------------------------------------------------------------------===// +// +/// iplist - The subset of list functionality that can safely be used on nodes +/// of polymorphic types, i.e. a heterogenous list with a common base class that +/// holds the next/prev pointers. The only state of the list itself is a single +/// pointer to the head of the list. +/// +/// This list can be in one of three interesting states: +/// 1. The list may be completely unconstructed. In this case, the head +/// pointer is null. When in this form, any query for an iterator (e.g. +/// begin() or end()) causes the list to transparently change to state #2. +/// 2. The list may be empty, but contain a sentinel for the end iterator. This +/// sentinel is created by the Traits::createSentinel method and is a link +/// in the list. When the list is empty, the pointer in the iplist points +/// to the sentinel. Once the sentinel is constructed, it +/// is not destroyed until the list is. +/// 3. The list may contain actual objects in it, which are stored as a doubly +/// linked list of nodes. One invariant of the list is that the predecessor +/// of the first node in the list always points to the last node in the list, +/// and the successor pointer for the sentinel (which always stays at the +/// end of the list) is always null. +/// +template<typename NodeTy, typename Traits=ilist_traits<NodeTy> > +class iplist : public Traits { + mutable NodeTy *Head; + + // Use the prev node pointer of 'head' as the tail pointer. This is really a + // circularly linked list where we snip the 'next' link from the sentinel node + // back to the first node in the list (to preserve assertions about going off + // the end of the list). + NodeTy *getTail() { return this->ensureHead(Head); } + const NodeTy *getTail() const { return this->ensureHead(Head); } + void setTail(NodeTy *N) const { this->noteHead(Head, N); } + + /// CreateLazySentinel - This method verifies whether the sentinel for the + /// list has been created and lazily makes it if not. + void CreateLazySentinel() const { + this->Traits::ensureHead(Head); + } + + static bool op_less(NodeTy &L, NodeTy &R) { return L < R; } + static bool op_equal(NodeTy &L, NodeTy &R) { return L == R; } + + // No fundamental reason why iplist can't by copyable, but the default + // copy/copy-assign won't do. + iplist(const iplist &); // do not implement + void operator=(const iplist &); // do not implement + +public: + typedef NodeTy *pointer; + typedef const NodeTy *const_pointer; + typedef NodeTy &reference; + typedef const NodeTy &const_reference; + typedef NodeTy value_type; + typedef ilist_iterator<NodeTy> iterator; + typedef ilist_iterator<const NodeTy> const_iterator; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + + iplist() : Head(this->Traits::provideInitialHead()) {} + ~iplist() { + if (!Head) return; + clear(); + Traits::destroySentinel(getTail()); + } + + // Iterator creation methods. + iterator begin() { + CreateLazySentinel(); + return iterator(Head); + } + const_iterator begin() const { + CreateLazySentinel(); + return const_iterator(Head); + } + iterator end() { + CreateLazySentinel(); + return iterator(getTail()); + } + const_iterator end() const { + CreateLazySentinel(); + return const_iterator(getTail()); + } + + // reverse iterator creation methods. + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin());} + + + // Miscellaneous inspection routines. + size_type max_size() const { return size_type(-1); } + bool empty() const { return Head == 0 || Head == getTail(); } + + // Front and back accessor functions... + reference front() { + assert(!empty() && "Called front() on empty list!"); + return *Head; + } + const_reference front() const { + assert(!empty() && "Called front() on empty list!"); + return *Head; + } + reference back() { + assert(!empty() && "Called back() on empty list!"); + return *this->getPrev(getTail()); + } + const_reference back() const { + assert(!empty() && "Called back() on empty list!"); + return *this->getPrev(getTail()); + } + + void swap(iplist &RHS) { + assert(0 && "Swap does not use list traits callback correctly yet!"); + std::swap(Head, RHS.Head); + } + + iterator insert(iterator where, NodeTy *New) { + NodeTy *CurNode = where.getNodePtrUnchecked(); + NodeTy *PrevNode = this->getPrev(CurNode); + this->setNext(New, CurNode); + this->setPrev(New, PrevNode); + + if (CurNode != Head) // Is PrevNode off the beginning of the list? + this->setNext(PrevNode, New); + else + Head = New; + this->setPrev(CurNode, New); + + this->addNodeToList(New); // Notify traits that we added a node... + return New; + } + + iterator insertAfter(iterator where, NodeTy *New) { + if (empty()) + return insert(begin(), New); + else + return insert(++where, New); + } + + NodeTy *remove(iterator &IT) { + assert(IT != end() && "Cannot remove end of list!"); + NodeTy *Node = &*IT; + NodeTy *NextNode = this->getNext(Node); + NodeTy *PrevNode = this->getPrev(Node); + + if (Node != Head) // Is PrevNode off the beginning of the list? + this->setNext(PrevNode, NextNode); + else + Head = NextNode; + this->setPrev(NextNode, PrevNode); + IT = NextNode; + this->removeNodeFromList(Node); // Notify traits that we removed a node... + + // Set the next/prev pointers of the current node to null. This isn't + // strictly required, but this catches errors where a node is removed from + // an ilist (and potentially deleted) with iterators still pointing at it. + // When those iterators are incremented or decremented, they will assert on + // the null next/prev pointer instead of "usually working". + this->setNext(Node, 0); + this->setPrev(Node, 0); + return Node; + } + + NodeTy *remove(const iterator &IT) { + iterator MutIt = IT; + return remove(MutIt); + } + + // erase - remove a node from the controlled sequence... and delete it. + iterator erase(iterator where) { + this->deleteNode(remove(where)); + return where; + } + + +private: + // transfer - The heart of the splice function. Move linked list nodes from + // [first, last) into position. + // + void transfer(iterator position, iplist &L2, iterator first, iterator last) { + assert(first != last && "Should be checked by callers"); + + if (position != last) { + // Note: we have to be careful about the case when we move the first node + // in the list. This node is the list sentinel node and we can't move it. + NodeTy *ThisSentinel = getTail(); + setTail(0); + NodeTy *L2Sentinel = L2.getTail(); + L2.setTail(0); + + // Remove [first, last) from its old position. + NodeTy *First = &*first, *Prev = getPrev(First); + NodeTy *Next = last.getNodePtrUnchecked(), *Last = getPrev(Next); + if (Prev) + this->setNext(Prev, Next); + else + L2.Head = Next; + this->setPrev(Next, Prev); + + // Splice [first, last) into its new position. + NodeTy *PosNext = position.getNodePtrUnchecked(); + NodeTy *PosPrev = getPrev(PosNext); + + // Fix head of list... + if (PosPrev) + this->setNext(PosPrev, First); + else + Head = First; + this->setPrev(First, PosPrev); + + // Fix end of list... + this->setNext(Last, PosNext); + this->setPrev(PosNext, Last); + + transferNodesFromList(L2, First, PosNext); + + // Now that everything is set, restore the pointers to the list sentinels. + L2.setTail(L2Sentinel); + setTail(ThisSentinel); + } + } + +public: + + //===----------------------------------------------------------------------=== + // Functionality derived from other functions defined above... + // + + size_type size() const { + if (Head == 0) return 0; // Don't require construction of sentinel if empty. + return std::distance(begin(), end()); + } + + iterator erase(iterator first, iterator last) { + while (first != last) + first = erase(first); + return last; + } + + void clear() { if (Head) erase(begin(), end()); } + + // Front and back inserters... + void push_front(NodeTy *val) { insert(begin(), val); } + void push_back(NodeTy *val) { insert(end(), val); } + void pop_front() { + assert(!empty() && "pop_front() on empty list!"); + erase(begin()); + } + void pop_back() { + assert(!empty() && "pop_back() on empty list!"); + iterator t = end(); erase(--t); + } + + // Special forms of insert... + template<class InIt> void insert(iterator where, InIt first, InIt last) { + for (; first != last; ++first) insert(where, *first); + } + + // Splice members - defined in terms of transfer... + void splice(iterator where, iplist &L2) { + if (!L2.empty()) + transfer(where, L2, L2.begin(), L2.end()); + } + void splice(iterator where, iplist &L2, iterator first) { + iterator last = first; ++last; + if (where == first || where == last) return; // No change + transfer(where, L2, first, last); + } + void splice(iterator where, iplist &L2, iterator first, iterator last) { + if (first != last) transfer(where, L2, first, last); + } + + + + //===----------------------------------------------------------------------=== + // High-Level Functionality that shouldn't really be here, but is part of list + // + + // These two functions are actually called remove/remove_if in list<>, but + // they actually do the job of erase, rename them accordingly. + // + void erase(const NodeTy &val) { + for (iterator I = begin(), E = end(); I != E; ) { + iterator next = I; ++next; + if (*I == val) erase(I); + I = next; + } + } + template<class Pr1> void erase_if(Pr1 pred) { + for (iterator I = begin(), E = end(); I != E; ) { + iterator next = I; ++next; + if (pred(*I)) erase(I); + I = next; + } + } + + template<class Pr2> void unique(Pr2 pred) { + if (empty()) return; + for (iterator I = begin(), E = end(), Next = begin(); ++Next != E;) { + if (pred(*I)) + erase(Next); + else + I = Next; + Next = I; + } + } + void unique() { unique(op_equal); } + + template<class Pr3> void merge(iplist &right, Pr3 pred) { + iterator first1 = begin(), last1 = end(); + iterator first2 = right.begin(), last2 = right.end(); + while (first1 != last1 && first2 != last2) + if (pred(*first2, *first1)) { + iterator next = first2; + transfer(first1, right, first2, ++next); + first2 = next; + } else { + ++first1; + } + if (first2 != last2) transfer(last1, right, first2, last2); + } + void merge(iplist &right) { return merge(right, op_less); } + + template<class Pr3> void sort(Pr3 pred); + void sort() { sort(op_less); } + void reverse(); +}; + + +template<typename NodeTy> +struct ilist : public iplist<NodeTy> { + typedef typename iplist<NodeTy>::size_type size_type; + typedef typename iplist<NodeTy>::iterator iterator; + + ilist() {} + ilist(const ilist &right) { + insert(this->begin(), right.begin(), right.end()); + } + explicit ilist(size_type count) { + insert(this->begin(), count, NodeTy()); + } + ilist(size_type count, const NodeTy &val) { + insert(this->begin(), count, val); + } + template<class InIt> ilist(InIt first, InIt last) { + insert(this->begin(), first, last); + } + + // bring hidden functions into scope + using iplist<NodeTy>::insert; + using iplist<NodeTy>::push_front; + using iplist<NodeTy>::push_back; + + // Main implementation here - Insert for a node passed by value... + iterator insert(iterator where, const NodeTy &val) { + return insert(where, createNode(val)); + } + + + // Front and back inserters... + void push_front(const NodeTy &val) { insert(this->begin(), val); } + void push_back(const NodeTy &val) { insert(this->end(), val); } + + // Special forms of insert... + template<class InIt> void insert(iterator where, InIt first, InIt last) { + for (; first != last; ++first) insert(where, *first); + } + void insert(iterator where, size_type count, const NodeTy &val) { + for (; count != 0; --count) insert(where, val); + } + + // Assign special forms... + void assign(size_type count, const NodeTy &val) { + iterator I = this->begin(); + for (; I != this->end() && count != 0; ++I, --count) + *I = val; + if (count != 0) + insert(this->end(), val, val); + else + erase(I, this->end()); + } + template<class InIt> void assign(InIt first1, InIt last1) { + iterator first2 = this->begin(), last2 = this->end(); + for ( ; first1 != last1 && first2 != last2; ++first1, ++first2) + *first1 = *first2; + if (first2 == last2) + erase(first1, last1); + else + insert(last1, first2, last2); + } + + + // Resize members... + void resize(size_type newsize, NodeTy val) { + iterator i = this->begin(); + size_type len = 0; + for ( ; i != this->end() && len < newsize; ++i, ++len) /* empty*/ ; + + if (len == newsize) + erase(i, this->end()); + else // i == end() + insert(this->end(), newsize - len, val); + } + void resize(size_type newsize) { resize(newsize, NodeTy()); } +}; + +} // End llvm namespace + +namespace std { + // Ensure that swap uses the fast list swap... + template<class Ty> + void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) { + Left.swap(Right); + } +} // End 'std' extensions... + +#endif // LLVM_ADT_ILIST_H diff --git a/include/llvm/ADT/ilist_node.h b/include/llvm/ADT/ilist_node.h new file mode 100644 index 0000000..dae7475 --- /dev/null +++ b/include/llvm/ADT/ilist_node.h @@ -0,0 +1,47 @@ +//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ilist_node class template, which is a convenient +// base class for creating classes that can be used with ilists. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_ILIST_NODE_H +#define LLVM_ADT_ILIST_NODE_H + +namespace llvm { + +template<typename NodeTy> +struct ilist_nextprev_traits; + +template<typename NodeTy> +struct ilist_traits; + +/// ilist_node - Base class that provides next/prev services for nodes +/// that use ilist_nextprev_traits or ilist_default_traits. +/// +template<typename NodeTy> +class ilist_node { +private: + friend struct ilist_nextprev_traits<NodeTy>; + friend struct ilist_traits<NodeTy>; + NodeTy *Prev, *Next; + NodeTy *getPrev() { return Prev; } + NodeTy *getNext() { return Next; } + const NodeTy *getPrev() const { return Prev; } + const NodeTy *getNext() const { return Next; } + void setPrev(NodeTy *N) { Prev = N; } + void setNext(NodeTy *N) { Next = N; } +protected: + ilist_node() : Prev(0), Next(0) {} +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ADT/iterator.cmake b/include/llvm/ADT/iterator.cmake new file mode 100644 index 0000000..55df8ce --- /dev/null +++ b/include/llvm/ADT/iterator.cmake @@ -0,0 +1,79 @@ +//===-- llvm/ADT/iterator - Portable wrapper around <iterator> --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides a wrapper around the mysterious <iterator> header file. +// In GCC 2.95.3, the file defines a bidirectional_iterator class (and other +// friends), instead of the standard iterator class. In GCC 3.1, the +// bidirectional_iterator class got moved out and the new, standards compliant, +// iterator<> class was added. Because there is nothing that we can do to get +// correct behavior on both compilers, we have this header with #ifdef's. Gross +// huh? +// +// By #includ'ing this file, you get the contents of <iterator> plus the +// following classes in the global namespace: +// +// 1. bidirectional_iterator +// 2. forward_iterator +// +// The #if directives' expressions are filled in by Autoconf. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_ITERATOR +#define LLVM_ADT_ITERATOR + +#include <iterator> + +#undef HAVE_BI_ITERATOR +#undef HAVE_STD_ITERATOR +#undef HAVE_FWD_ITERATOR + +// defined by Kevin +#define HAVE_STD_ITERATOR 1 + +#ifdef _MSC_VER +# define HAVE_BI_ITERATOR 0 +# define HAVE_STD_ITERATOR 1 +# define HAVE_FWD_ITERATOR 0 +#endif + +#if !HAVE_BI_ITERATOR +# if HAVE_STD_ITERATOR +/// If the bidirectional iterator is not defined, we attempt to define it in +/// terms of the C++ standard iterator. Otherwise, we import it with a "using" +/// statement. +/// +template<class Ty, class PtrDiffTy> +struct bidirectional_iterator + : public std::iterator<std::bidirectional_iterator_tag, Ty, PtrDiffTy> { +}; +# else +# error "Need to have standard iterator to define bidirectional iterator!" +# endif +#else +using std::bidirectional_iterator; +#endif + +#if !HAVE_FWD_ITERATOR +# if HAVE_STD_ITERATOR +/// If the forward iterator is not defined, attempt to define it in terms of +/// the C++ standard iterator. Otherwise, we import it with a "using" statement. +/// +template<class Ty, class PtrDiffTy> +struct forward_iterator + : public std::iterator<std::forward_iterator_tag, Ty, PtrDiffTy> { +}; +# else +# error "Need to have standard iterator to define forward iterator!" +# endif +#else +using std::forward_iterator; +#endif + +#endif diff --git a/include/llvm/ADT/iterator.h.in b/include/llvm/ADT/iterator.h.in new file mode 100644 index 0000000..dce7462 --- /dev/null +++ b/include/llvm/ADT/iterator.h.in @@ -0,0 +1,76 @@ +//==-- llvm/ADT/iterator.h - Portable wrapper around <iterator> --*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides a wrapper around the mysterious <iterator> header file. +// In GCC 2.95.3, the file defines a bidirectional_iterator class (and other +// friends), instead of the standard iterator class. In GCC 3.1, the +// bidirectional_iterator class got moved out and the new, standards compliant, +// iterator<> class was added. Because there is nothing that we can do to get +// correct behavior on both compilers, we have this header with #ifdef's. Gross +// huh? +// +// By #includ'ing this file, you get the contents of <iterator> plus the +// following classes in the global namespace: +// +// 1. bidirectional_iterator +// 2. forward_iterator +// +// The #if directives' expressions are filled in by Autoconf. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_ITERATOR_H +#define LLVM_ADT_ITERATOR_H + +#include <iterator> + +#undef HAVE_BI_ITERATOR +#undef HAVE_STD_ITERATOR +#undef HAVE_FWD_ITERATOR + +#ifdef _MSC_VER +# define HAVE_BI_ITERATOR 0 +# define HAVE_STD_ITERATOR 1 +# define HAVE_FWD_ITERATOR 0 +#endif + +#if !HAVE_BI_ITERATOR +# if HAVE_STD_ITERATOR +/// If the bidirectional iterator is not defined, we attempt to define it in +/// terms of the C++ standard iterator. Otherwise, we import it with a "using" +/// statement. +/// +template<class Ty, class PtrDiffTy> +struct bidirectional_iterator + : public std::iterator<std::bidirectional_iterator_tag, Ty, PtrDiffTy> { +}; +# else +# error "Need to have standard iterator to define bidirectional iterator!" +# endif +#else +using std::bidirectional_iterator; +#endif + +#if !HAVE_FWD_ITERATOR +# if HAVE_STD_ITERATOR +/// If the forward iterator is not defined, attempt to define it in terms of +/// the C++ standard iterator. Otherwise, we import it with a "using" statement. +/// +template<class Ty, class PtrDiffTy> +struct forward_iterator + : public std::iterator<std::forward_iterator_tag, Ty, PtrDiffTy> { +}; +# else +# error "Need to have standard iterator to define forward iterator!" +# endif +#else +using std::forward_iterator; +#endif + +#endif // LLVM_ADT_ITERATOR_H diff --git a/include/llvm/AbstractTypeUser.h b/include/llvm/AbstractTypeUser.h new file mode 100644 index 0000000..c1216ba --- /dev/null +++ b/include/llvm/AbstractTypeUser.h @@ -0,0 +1,197 @@ +//===-- llvm/AbstractTypeUser.h - AbstractTypeUser Interface ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the AbstractTypeUser class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ABSTRACT_TYPE_USER_H +#define LLVM_ABSTRACT_TYPE_USER_H + +#if !defined(LLVM_TYPE_H) && !defined(LLVM_VALUE_H) +#error Do not include this file directly. Include Type.h instead. +#error Some versions of GCC (e.g. 3.4 and 4.1) can not handle the inlined method +#error PATypeHolder::dropRef() correctly otherwise. +#endif + +// This is the "master" include for <cassert> Whether this file needs it or not, +// it must always include <cassert> for the files which include +// llvm/AbstractTypeUser.h +// +// In this way, most every LLVM source file will have access to the assert() +// macro without having to #include <cassert> directly. +// +#include <cassert> + +namespace llvm { + +class Type; +class DerivedType; +template<typename T> struct simplify_type; + +/// The AbstractTypeUser class is an interface to be implemented by classes who +/// could possibly use an abstract type. Abstract types are denoted by the +/// isAbstract flag set to true in the Type class. These are classes that +/// contain an Opaque type in their structure somewhere. +/// +/// Classes must implement this interface so that they may be notified when an +/// abstract type is resolved. Abstract types may be resolved into more +/// concrete types through: linking, parsing, and bitcode reading. When this +/// happens, all of the users of the type must be updated to reference the new, +/// more concrete type. They are notified through the AbstractTypeUser +/// interface. +/// +/// In addition to this, AbstractTypeUsers must keep the use list of the +/// potentially abstract type that they reference up-to-date. To do this in a +/// nice, transparent way, the PATypeHandle class is used to hold "Potentially +/// Abstract Types", and keep the use list of the abstract types up-to-date. +/// @brief LLVM Abstract Type User Representation +class AbstractTypeUser { +protected: + virtual ~AbstractTypeUser(); // Derive from me +public: + + /// refineAbstractType - The callback method invoked when an abstract type is + /// resolved to another type. An object must override this method to update + /// its internal state to reference NewType instead of OldType. + /// + virtual void refineAbstractType(const DerivedType *OldTy, + const Type *NewTy) = 0; + + /// The other case which AbstractTypeUsers must be aware of is when a type + /// makes the transition from being abstract (where it has clients on it's + /// AbstractTypeUsers list) to concrete (where it does not). This method + /// notifies ATU's when this occurs for a type. + /// + virtual void typeBecameConcrete(const DerivedType *AbsTy) = 0; + + // for debugging... + virtual void dump() const = 0; +}; + + +/// PATypeHandle - Handle to a Type subclass. This class is used to keep the +/// use list of abstract types up-to-date. +/// +class PATypeHandle { + const Type *Ty; + AbstractTypeUser * const User; + + // These functions are defined at the bottom of Type.h. See the comment there + // for justification. + void addUser(); + void removeUser(); +public: + // ctor - Add use to type if abstract. Note that Ty must not be null + inline PATypeHandle(const Type *ty, AbstractTypeUser *user) + : Ty(ty), User(user) { + addUser(); + } + + // ctor - Add use to type if abstract. + inline PATypeHandle(const PATypeHandle &T) : Ty(T.Ty), User(T.User) { + addUser(); + } + + // dtor - Remove reference to type... + inline ~PATypeHandle() { removeUser(); } + + // Automatic casting operator so that the handle may be used naturally + inline operator Type *() const { return const_cast<Type*>(Ty); } + inline Type *get() const { return const_cast<Type*>(Ty); } + + // operator= - Allow assignment to handle + inline Type *operator=(const Type *ty) { + if (Ty != ty) { // Ensure we don't accidentally drop last ref to Ty + removeUser(); + Ty = ty; + addUser(); + } + return get(); + } + + // operator= - Allow assignment to handle + inline const Type *operator=(const PATypeHandle &T) { + return operator=(T.Ty); + } + + inline bool operator==(const Type *ty) { + return Ty == ty; + } + + // operator-> - Allow user to dereference handle naturally... + inline const Type *operator->() const { return Ty; } +}; + + +/// PATypeHolder - Holder class for a potentially abstract type. This uses +/// efficient union-find techniques to handle dynamic type resolution. Unless +/// you need to do custom processing when types are resolved, you should always +/// use PATypeHolders in preference to PATypeHandles. +/// +class PATypeHolder { + mutable const Type *Ty; + void destroy(); +public: + PATypeHolder(const Type *ty) : Ty(ty) { + addRef(); + } + PATypeHolder(const PATypeHolder &T) : Ty(T.Ty) { + addRef(); + } + + ~PATypeHolder() { if (Ty) dropRef(); } + + operator Type *() const { return get(); } + Type *get() const; + + // operator-> - Allow user to dereference handle naturally... + Type *operator->() const { return get(); } + + // operator= - Allow assignment to handle + Type *operator=(const Type *ty) { + if (Ty != ty) { // Don't accidentally drop last ref to Ty. + dropRef(); + Ty = ty; + addRef(); + } + return get(); + } + Type *operator=(const PATypeHolder &H) { + return operator=(H.Ty); + } + + /// getRawType - This should only be used to implement the vmcore library. + /// + const Type *getRawType() const { return Ty; } + +private: + void addRef(); + void dropRef(); + friend class TypeMapBase; +}; + +// simplify_type - Allow clients to treat uses just like values when using +// casting operators. +template<> struct simplify_type<PATypeHolder> { + typedef const Type* SimpleType; + static SimpleType getSimplifiedValue(const PATypeHolder &Val) { + return static_cast<SimpleType>(Val.get()); + } +}; +template<> struct simplify_type<const PATypeHolder> { + typedef const Type* SimpleType; + static SimpleType getSimplifiedValue(const PATypeHolder &Val) { + return static_cast<SimpleType>(Val.get()); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h new file mode 100644 index 0000000..ba040e1 --- /dev/null +++ b/include/llvm/Analysis/AliasAnalysis.h @@ -0,0 +1,363 @@ +//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the generic AliasAnalysis interface, which is used as the +// common interface used by all clients of alias analysis information, and +// implemented by all alias analysis implementations. Mod/Ref information is +// also captured by this interface. +// +// Implementations of this interface must implement the various virtual methods, +// which automatically provides functionality for the entire suite of client +// APIs. +// +// This API represents memory as a (Pointer, Size) pair. The Pointer component +// specifies the base memory address of the region, the Size specifies how large +// of an area is being queried. If Size is 0, two pointers only alias if they +// are exactly equal. If size is greater than zero, but small, the two pointers +// alias if the areas pointed to overlap. If the size is very large (ie, ~0U), +// then the two pointers alias if they may be pointing to components of the same +// memory object. Pointers that point to two completely different objects in +// memory never alias, regardless of the value of the Size component. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_ALIAS_ANALYSIS_H +#define LLVM_ANALYSIS_ALIAS_ANALYSIS_H + +#include "llvm/Support/CallSite.h" +#include "llvm/System/IncludeFile.h" +#include <vector> + +namespace llvm { + +class LoadInst; +class StoreInst; +class VAArgInst; +class TargetData; +class Pass; +class AnalysisUsage; + +class AliasAnalysis { +protected: + const TargetData *TD; + AliasAnalysis *AA; // Previous Alias Analysis to chain to. + + /// InitializeAliasAnalysis - Subclasses must call this method to initialize + /// the AliasAnalysis interface before any other methods are called. This is + /// typically called by the run* methods of these subclasses. This may be + /// called multiple times. + /// + void InitializeAliasAnalysis(Pass *P); + + /// getAnalysisUsage - All alias analysis implementations should invoke this + /// directly (using AliasAnalysis::getAnalysisUsage(AU)) to make sure that + /// TargetData is required by the pass. + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + +public: + static char ID; // Class identification, replacement for typeinfo + AliasAnalysis() : TD(0), AA(0) {} + virtual ~AliasAnalysis(); // We want to be subclassed + + /// getTargetData - Every alias analysis implementation depends on the size of + /// data items in the current Target. This provides a uniform way to handle + /// it. + /// + const TargetData &getTargetData() const { return *TD; } + + //===--------------------------------------------------------------------===// + /// Alias Queries... + /// + + /// Alias analysis result - Either we know for sure that it does not alias, we + /// know for sure it must alias, or we don't know anything: The two pointers + /// _might_ alias. This enum is designed so you can do things like: + /// if (AA.alias(P1, P2)) { ... } + /// to check to see if two pointers might alias. + /// + enum AliasResult { NoAlias = 0, MayAlias = 1, MustAlias = 2 }; + + /// alias - The main low level interface to the alias analysis implementation. + /// Returns a Result indicating whether the two pointers are aliased to each + /// other. This is the interface that must be implemented by specific alias + /// analysis implementations. + /// + virtual AliasResult alias(const Value *V1, unsigned V1Size, + const Value *V2, unsigned V2Size); + + /// getMustAliases - If there are any pointers known that must alias this + /// pointer, return them now. This allows alias-set based alias analyses to + /// perform a form a value numbering (which is exposed by load-vn). If an + /// alias analysis supports this, it should ADD any must aliased pointers to + /// the specified vector. + /// + virtual void getMustAliases(Value *P, std::vector<Value*> &RetVals); + + /// pointsToConstantMemory - If the specified pointer is known to point into + /// constant global memory, return true. This allows disambiguation of store + /// instructions from constant pointers. + /// + virtual bool pointsToConstantMemory(const Value *P); + + //===--------------------------------------------------------------------===// + /// Simple mod/ref information... + /// + + /// ModRefResult - Represent the result of a mod/ref query. Mod and Ref are + /// bits which may be or'd together. + /// + enum ModRefResult { NoModRef = 0, Ref = 1, Mod = 2, ModRef = 3 }; + + + /// ModRefBehavior - Summary of how a function affects memory in the program. + /// Loads from constant globals are not considered memory accesses for this + /// interface. Also, functions may freely modify stack space local to their + /// invocation without having to report it through these interfaces. + enum ModRefBehavior { + // DoesNotAccessMemory - This function does not perform any non-local loads + // or stores to memory. + // + // This property corresponds to the GCC 'const' attribute. + DoesNotAccessMemory, + + // AccessesArguments - This function accesses function arguments in well + // known (possibly volatile) ways, but does not access any other memory. + // + // Clients may use the Info parameter of getModRefBehavior to get specific + // information about how pointer arguments are used. + AccessesArguments, + + // AccessesArgumentsAndGlobals - This function has accesses function + // arguments and global variables well known (possibly volatile) ways, but + // does not access any other memory. + // + // Clients may use the Info parameter of getModRefBehavior to get specific + // information about how pointer arguments are used. + AccessesArgumentsAndGlobals, + + // OnlyReadsMemory - This function does not perform any non-local stores or + // volatile loads, but may read from any memory location. + // + // This property corresponds to the GCC 'pure' attribute. + OnlyReadsMemory, + + // UnknownModRefBehavior - This indicates that the function could not be + // classified into one of the behaviors above. + UnknownModRefBehavior + }; + + /// PointerAccessInfo - This struct is used to return results for pointers, + /// globals, and the return value of a function. + struct PointerAccessInfo { + /// V - The value this record corresponds to. This may be an Argument for + /// the function, a GlobalVariable, or null, corresponding to the return + /// value for the function. + Value *V; + + /// ModRefInfo - Whether the pointer is loaded or stored to/from. + /// + ModRefResult ModRefInfo; + + /// AccessType - Specific fine-grained access information for the argument. + /// If none of these classifications is general enough, the + /// getModRefBehavior method should not return AccessesArguments*. If a + /// record is not returned for a particular argument, the argument is never + /// dead and never dereferenced. + enum AccessType { + /// ScalarAccess - The pointer is dereferenced. + /// + ScalarAccess, + + /// ArrayAccess - The pointer is indexed through as an array of elements. + /// + ArrayAccess, + + /// ElementAccess ?? P->F only? + + /// CallsThrough - Indirect calls are made through the specified function + /// pointer. + CallsThrough + }; + }; + + /// getModRefBehavior - Return the behavior when calling the given call site. + virtual ModRefBehavior getModRefBehavior(CallSite CS, + std::vector<PointerAccessInfo> *Info = 0); + + /// getModRefBehavior - Return the behavior when calling the given function. + /// For use when the call site is not known. + virtual ModRefBehavior getModRefBehavior(Function *F, + std::vector<PointerAccessInfo> *Info = 0); + + /// doesNotAccessMemory - If the specified call is known to never read or + /// write memory, return true. If the call only reads from known-constant + /// memory, it is also legal to return true. Calls that unwind the stack + /// are legal for this predicate. + /// + /// Many optimizations (such as CSE and LICM) can be performed on such calls + /// without worrying about aliasing properties, and many calls have this + /// property (e.g. calls to 'sin' and 'cos'). + /// + /// This property corresponds to the GCC 'const' attribute. + /// + bool doesNotAccessMemory(CallSite CS) { + return getModRefBehavior(CS) == DoesNotAccessMemory; + } + + /// doesNotAccessMemory - If the specified function is known to never read or + /// write memory, return true. For use when the call site is not known. + /// + bool doesNotAccessMemory(Function *F) { + return getModRefBehavior(F) == DoesNotAccessMemory; + } + + /// onlyReadsMemory - If the specified call is known to only read from + /// non-volatile memory (or not access memory at all), return true. Calls + /// that unwind the stack are legal for this predicate. + /// + /// This property allows many common optimizations to be performed in the + /// absence of interfering store instructions, such as CSE of strlen calls. + /// + /// This property corresponds to the GCC 'pure' attribute. + /// + bool onlyReadsMemory(CallSite CS) { + ModRefBehavior MRB = getModRefBehavior(CS); + return MRB == DoesNotAccessMemory || MRB == OnlyReadsMemory; + } + + /// onlyReadsMemory - If the specified function is known to only read from + /// non-volatile memory (or not access memory at all), return true. For use + /// when the call site is not known. + /// + bool onlyReadsMemory(Function *F) { + ModRefBehavior MRB = getModRefBehavior(F); + return MRB == DoesNotAccessMemory || MRB == OnlyReadsMemory; + } + + + /// getModRefInfo - Return information about whether or not an instruction may + /// read or write memory specified by the pointer operand. An instruction + /// that doesn't read or write memory may be trivially LICM'd for example. + + /// getModRefInfo (for call sites) - Return whether information about whether + /// a particular call site modifies or reads the memory specified by the + /// pointer. + /// + virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size); + + /// getModRefInfo - Return information about whether two call sites may refer + /// to the same set of memory locations. This function returns NoModRef if + /// the two calls refer to disjoint memory locations, Ref if CS1 reads memory + /// written by CS2, Mod if CS1 writes to memory read or written by CS2, or + /// ModRef if CS1 might read or write memory accessed by CS2. + /// + virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2); + + /// hasNoModRefInfoForCalls - Return true if the analysis has no mod/ref + /// information for pairs of function calls (other than "pure" and "const" + /// functions). This can be used by clients to avoid many pointless queries. + /// Remember that if you override this and chain to another analysis, you must + /// make sure that it doesn't have mod/ref info either. + /// + virtual bool hasNoModRefInfoForCalls() const; + +public: + /// Convenience functions... + ModRefResult getModRefInfo(LoadInst *L, Value *P, unsigned Size); + ModRefResult getModRefInfo(StoreInst *S, Value *P, unsigned Size); + ModRefResult getModRefInfo(CallInst *C, Value *P, unsigned Size) { + return getModRefInfo(CallSite(C), P, Size); + } + ModRefResult getModRefInfo(InvokeInst *I, Value *P, unsigned Size) { + return getModRefInfo(CallSite(I), P, Size); + } + ModRefResult getModRefInfo(VAArgInst* I, Value* P, unsigned Size) { + return AliasAnalysis::ModRef; + } + ModRefResult getModRefInfo(Instruction *I, Value *P, unsigned Size) { + switch (I->getOpcode()) { + case Instruction::VAArg: return getModRefInfo((VAArgInst*)I, P, Size); + case Instruction::Load: return getModRefInfo((LoadInst*)I, P, Size); + case Instruction::Store: return getModRefInfo((StoreInst*)I, P, Size); + case Instruction::Call: return getModRefInfo((CallInst*)I, P, Size); + case Instruction::Invoke: return getModRefInfo((InvokeInst*)I, P, Size); + default: return NoModRef; + } + } + + //===--------------------------------------------------------------------===// + /// Higher level methods for querying mod/ref information. + /// + + /// canBasicBlockModify - Return true if it is possible for execution of the + /// specified basic block to modify the value pointed to by Ptr. + /// + bool canBasicBlockModify(const BasicBlock &BB, const Value *P, unsigned Size); + + /// canInstructionRangeModify - Return true if it is possible for the + /// execution of the specified instructions to modify the value pointed to by + /// Ptr. The instructions to consider are all of the instructions in the + /// range of [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block. + /// + bool canInstructionRangeModify(const Instruction &I1, const Instruction &I2, + const Value *Ptr, unsigned Size); + + //===--------------------------------------------------------------------===// + /// Methods that clients should call when they transform the program to allow + /// alias analyses to update their internal data structures. Note that these + /// methods may be called on any instruction, regardless of whether or not + /// they have pointer-analysis implications. + /// + + /// deleteValue - This method should be called whenever an LLVM Value is + /// deleted from the program, for example when an instruction is found to be + /// redundant and is eliminated. + /// + virtual void deleteValue(Value *V); + + /// copyValue - This method should be used whenever a preexisting value in the + /// program is copied or cloned, introducing a new value. Note that analysis + /// implementations should tolerate clients that use this method to introduce + /// the same value multiple times: if the analysis already knows about a + /// value, it should ignore the request. + /// + virtual void copyValue(Value *From, Value *To); + + /// replaceWithNewValue - This method is the obvious combination of the two + /// above, and it provided as a helper to simplify client code. + /// + void replaceWithNewValue(Value *Old, Value *New) { + copyValue(Old, New); + deleteValue(Old); + } +}; + +/// isNoAliasCall - Return true if this pointer is returned by a noalias +/// function. +bool isNoAliasCall(const Value *V); + +/// isIdentifiedObject - Return true if this pointer refers to a distinct and +/// identifiable object. This returns true for: +/// Global Variables and Functions +/// Allocas and Mallocs +/// ByVal and NoAlias Arguments +/// NoAlias returns +/// +bool isIdentifiedObject(const Value *V); + +} // End llvm namespace + +// Because of the way .a files work, we must force the BasicAA implementation to +// be pulled in if the AliasAnalysis header is included. Otherwise we run +// the risk of AliasAnalysis being used, but the default implementation not +// being linked into the tool that uses it. +FORCE_DEFINING_FILE_TO_BE_LINKED(AliasAnalysis) +FORCE_DEFINING_FILE_TO_BE_LINKED(BasicAliasAnalysis) + +#endif diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h new file mode 100644 index 0000000..786c1d1 --- /dev/null +++ b/include/llvm/Analysis/AliasSetTracker.h @@ -0,0 +1,393 @@ +//===- llvm/Analysis/AliasSetTracker.h - Build Alias Sets -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines two classes: AliasSetTracker and AliasSet. These interface +// are used to classify a collection of pointer references into a maximal number +// of disjoint sets. Each AliasSet object constructed by the AliasSetTracker +// object refers to memory disjoint from the other sets. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H +#define LLVM_ANALYSIS_ALIASSETTRACKER_H + +#include "llvm/Support/CallSite.h" +#include "llvm/Support/Streams.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/ilist.h" +#include "llvm/ADT/ilist_node.h" +#include <vector> + +namespace llvm { + +class AliasAnalysis; +class LoadInst; +class StoreInst; +class FreeInst; +class VAArgInst; +class AliasSetTracker; +class AliasSet; + +class AliasSet : public ilist_node<AliasSet> { + friend class AliasSetTracker; + + class PointerRec { + Value *Val; // The pointer this record corresponds to. + PointerRec **PrevInList, *NextInList; + AliasSet *AS; + unsigned Size; + public: + PointerRec(Value *V) + : Val(V), PrevInList(0), NextInList(0), AS(0), Size(0) {} + + Value *getValue() const { return Val; } + + PointerRec *getNext() const { return NextInList; } + bool hasAliasSet() const { return AS != 0; } + + PointerRec** setPrevInList(PointerRec **PIL) { + PrevInList = PIL; + return &NextInList; + } + + void updateSize(unsigned NewSize) { + if (NewSize > Size) Size = NewSize; + } + + unsigned getSize() const { return Size; } + + AliasSet *getAliasSet(AliasSetTracker &AST) { + assert(AS && "No AliasSet yet!"); + if (AS->Forward) { + AliasSet *OldAS = AS; + AS = OldAS->getForwardedTarget(AST); + AS->addRef(); + OldAS->dropRef(AST); + } + return AS; + } + + void setAliasSet(AliasSet *as) { + assert(AS == 0 && "Already have an alias set!"); + AS = as; + } + + void eraseFromList() { + if (NextInList) NextInList->PrevInList = PrevInList; + *PrevInList = NextInList; + if (AS->PtrListEnd == &NextInList) { + AS->PtrListEnd = PrevInList; + assert(*AS->PtrListEnd == 0 && "List not terminated right!"); + } + delete this; + } + }; + + PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes. + AliasSet *Forward; // Forwarding pointer. + AliasSet *Next, *Prev; // Doubly linked list of AliasSets. + + std::vector<CallSite> CallSites; // All calls & invokes in this alias set. + + // RefCount - Number of nodes pointing to this AliasSet plus the number of + // AliasSets forwarding to it. + unsigned RefCount : 28; + + /// AccessType - Keep track of whether this alias set merely refers to the + /// locations of memory, whether it modifies the memory, or whether it does + /// both. The lattice goes from "NoModRef" to either Refs or Mods, then to + /// ModRef as necessary. + /// + enum AccessType { + NoModRef = 0, Refs = 1, // Ref = bit 1 + Mods = 2, ModRef = 3 // Mod = bit 2 + }; + unsigned AccessTy : 2; + + /// AliasType - Keep track the relationships between the pointers in the set. + /// Lattice goes from MustAlias to MayAlias. + /// + enum AliasType { + MustAlias = 0, MayAlias = 1 + }; + unsigned AliasTy : 1; + + // Volatile - True if this alias set contains volatile loads or stores. + bool Volatile : 1; + + void addRef() { ++RefCount; } + void dropRef(AliasSetTracker &AST) { + assert(RefCount >= 1 && "Invalid reference count detected!"); + if (--RefCount == 0) + removeFromTracker(AST); + } + +public: + /// Accessors... + bool isRef() const { return AccessTy & Refs; } + bool isMod() const { return AccessTy & Mods; } + bool isMustAlias() const { return AliasTy == MustAlias; } + bool isMayAlias() const { return AliasTy == MayAlias; } + + // isVolatile - Return true if this alias set contains volatile loads or + // stores. + bool isVolatile() const { return Volatile; } + + /// isForwardingAliasSet - Return true if this alias set should be ignored as + /// part of the AliasSetTracker object. + bool isForwardingAliasSet() const { return Forward; } + + /// mergeSetIn - Merge the specified alias set into this alias set... + /// + void mergeSetIn(AliasSet &AS, AliasSetTracker &AST); + + // Alias Set iteration - Allow access to all of the pointer which are part of + // this alias set... + class iterator; + iterator begin() const { return iterator(PtrList); } + iterator end() const { return iterator(); } + bool empty() const { return PtrList == 0; } + + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + void dump() const; + + /// Define an iterator for alias sets... this is just a forward iterator. + class iterator : public forward_iterator<PointerRec, ptrdiff_t> { + PointerRec *CurNode; + public: + explicit iterator(PointerRec *CN = 0) : CurNode(CN) {} + + bool operator==(const iterator& x) const { + return CurNode == x.CurNode; + } + bool operator!=(const iterator& x) const { return !operator==(x); } + + const iterator &operator=(const iterator &I) { + CurNode = I.CurNode; + return *this; + } + + value_type &operator*() const { + assert(CurNode && "Dereferencing AliasSet.end()!"); + return *CurNode; + } + value_type *operator->() const { return &operator*(); } + + Value *getPointer() const { return CurNode->getValue(); } + unsigned getSize() const { return CurNode->getSize(); } + + iterator& operator++() { // Preincrement + assert(CurNode && "Advancing past AliasSet.end()!"); + CurNode = CurNode->getNext(); + return *this; + } + iterator operator++(int) { // Postincrement + iterator tmp = *this; ++*this; return tmp; + } + }; + +private: + // Can only be created by AliasSetTracker. Also, ilist creates one + // to serve as a sentinel. + friend struct ilist_sentinel_traits<AliasSet>; + AliasSet() : PtrList(0), PtrListEnd(&PtrList), Forward(0), RefCount(0), + AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) { + } + + AliasSet(const AliasSet &AS); // do not implement + void operator=(const AliasSet &AS); // do not implement + + PointerRec *getSomePointer() const { + return PtrList; + } + + /// getForwardedTarget - Return the real alias set this represents. If this + /// has been merged with another set and is forwarding, return the ultimate + /// destination set. This also implements the union-find collapsing as well. + AliasSet *getForwardedTarget(AliasSetTracker &AST) { + if (!Forward) return this; + + AliasSet *Dest = Forward->getForwardedTarget(AST); + if (Dest != Forward) { + Dest->addRef(); + Forward->dropRef(AST); + Forward = Dest; + } + return Dest; + } + + void removeFromTracker(AliasSetTracker &AST); + + void addPointer(AliasSetTracker &AST, PointerRec &Entry, unsigned Size, + bool KnownMustAlias = false); + void addCallSite(CallSite CS, AliasAnalysis &AA); + void removeCallSite(CallSite CS) { + for (size_t i = 0, e = CallSites.size(); i != e; ++i) + if (CallSites[i].getInstruction() == CS.getInstruction()) { + CallSites[i] = CallSites.back(); + CallSites.pop_back(); + } + } + void setVolatile() { Volatile = true; } + + /// aliasesPointer - Return true if the specified pointer "may" (or must) + /// alias one of the members in the set. + /// + bool aliasesPointer(const Value *Ptr, unsigned Size, AliasAnalysis &AA) const; + bool aliasesCallSite(CallSite CS, AliasAnalysis &AA) const; +}; + +inline std::ostream& operator<<(std::ostream &OS, const AliasSet &AS) { + AS.print(OS); + return OS; +} + + +class AliasSetTracker { + AliasAnalysis &AA; + ilist<AliasSet> AliasSets; + + // Map from pointers to their node + DenseMap<Value*, AliasSet::PointerRec*> PointerMap; +public: + /// AliasSetTracker ctor - Create an empty collection of AliasSets, and use + /// the specified alias analysis object to disambiguate load and store + /// addresses. + explicit AliasSetTracker(AliasAnalysis &aa) : AA(aa) {} + ~AliasSetTracker() { clear(); } + + /// add methods - These methods are used to add different types of + /// instructions to the alias sets. Adding a new instruction can result in + /// one of three actions happening: + /// + /// 1. If the instruction doesn't alias any other sets, create a new set. + /// 2. If the instruction aliases exactly one set, add it to the set + /// 3. If the instruction aliases multiple sets, merge the sets, and add + /// the instruction to the result. + /// + /// These methods return true if inserting the instruction resulted in the + /// addition of a new alias set (i.e., the pointer did not alias anything). + /// + bool add(Value *Ptr, unsigned Size); // Add a location + bool add(LoadInst *LI); + bool add(StoreInst *SI); + bool add(FreeInst *FI); + bool add(VAArgInst *VAAI); + bool add(CallSite CS); // Call/Invoke instructions + bool add(CallInst *CI) { return add(CallSite(CI)); } + bool add(InvokeInst *II) { return add(CallSite(II)); } + bool add(Instruction *I); // Dispatch to one of the other add methods... + void add(BasicBlock &BB); // Add all instructions in basic block + void add(const AliasSetTracker &AST); // Add alias relations from another AST + + /// remove methods - These methods are used to remove all entries that might + /// be aliased by the specified instruction. These methods return true if any + /// alias sets were eliminated. + bool remove(Value *Ptr, unsigned Size); // Remove a location + bool remove(LoadInst *LI); + bool remove(StoreInst *SI); + bool remove(FreeInst *FI); + bool remove(VAArgInst *VAAI); + bool remove(CallSite CS); + bool remove(CallInst *CI) { return remove(CallSite(CI)); } + bool remove(InvokeInst *II) { return remove(CallSite(II)); } + bool remove(Instruction *I); + void remove(AliasSet &AS); + + void clear(); + + /// getAliasSets - Return the alias sets that are active. + /// + const ilist<AliasSet> &getAliasSets() const { return AliasSets; } + + /// getAliasSetForPointer - Return the alias set that the specified pointer + /// lives in. If the New argument is non-null, this method sets the value to + /// true if a new alias set is created to contain the pointer (because the + /// pointer didn't alias anything). + AliasSet &getAliasSetForPointer(Value *P, unsigned Size, bool *New = 0); + + /// getAliasSetForPointerIfExists - Return the alias set containing the + /// location specified if one exists, otherwise return null. + AliasSet *getAliasSetForPointerIfExists(Value *P, unsigned Size) { + return findAliasSetForPointer(P, Size); + } + + /// containsPointer - Return true if the specified location is represented by + /// this alias set, false otherwise. This does not modify the AST object or + /// alias sets. + bool containsPointer(Value *P, unsigned Size) const; + + /// getAliasAnalysis - Return the underlying alias analysis object used by + /// this tracker. + AliasAnalysis &getAliasAnalysis() const { return AA; } + + /// deleteValue method - This method is used to remove a pointer value from + /// the AliasSetTracker entirely. It should be used when an instruction is + /// deleted from the program to update the AST. If you don't use this, you + /// would have dangling pointers to deleted instructions. + /// + void deleteValue(Value *PtrVal); + + /// copyValue - This method should be used whenever a preexisting value in the + /// program is copied or cloned, introducing a new value. Note that it is ok + /// for clients that use this method to introduce the same value multiple + /// times: if the tracker already knows about a value, it will ignore the + /// request. + /// + void copyValue(Value *From, Value *To); + + + typedef ilist<AliasSet>::iterator iterator; + typedef ilist<AliasSet>::const_iterator const_iterator; + + const_iterator begin() const { return AliasSets.begin(); } + const_iterator end() const { return AliasSets.end(); } + + iterator begin() { return AliasSets.begin(); } + iterator end() { return AliasSets.end(); } + + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + void dump() const; + +private: + friend class AliasSet; + void removeAliasSet(AliasSet *AS); + + // getEntryFor - Just like operator[] on the map, except that it creates an + // entry for the pointer if it doesn't already exist. + AliasSet::PointerRec &getEntryFor(Value *V) { + AliasSet::PointerRec *&Entry = PointerMap[V]; + if (Entry == 0) + Entry = new AliasSet::PointerRec(V); + return *Entry; + } + + AliasSet &addPointer(Value *P, unsigned Size, AliasSet::AccessType E, + bool &NewSet) { + NewSet = false; + AliasSet &AS = getAliasSetForPointer(P, Size, &NewSet); + AS.AccessTy |= E; + return AS; + } + AliasSet *findAliasSetForPointer(const Value *Ptr, unsigned Size); + + AliasSet *findAliasSetForCallSite(CallSite CS); +}; + +inline std::ostream& operator<<(std::ostream &OS, const AliasSetTracker &AST) { + AST.print(OS); + return OS; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/CFGPrinter.h b/include/llvm/Analysis/CFGPrinter.h new file mode 100644 index 0000000..6a479d1 --- /dev/null +++ b/include/llvm/Analysis/CFGPrinter.h @@ -0,0 +1,24 @@ +//===-- CFGPrinter.h - CFG printer external interface ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines external functions that can be called to explicitly +// instantiate the CFG printer. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_CFGPRINTER_H +#define LLVM_ANALYSIS_CFGPRINTER_H + +namespace llvm { + class FunctionPass; + FunctionPass *createCFGPrinterPass (); + FunctionPass *createCFGOnlyPrinterPass (); +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/CallGraph.h b/include/llvm/Analysis/CallGraph.h new file mode 100644 index 0000000..de83969 --- /dev/null +++ b/include/llvm/Analysis/CallGraph.h @@ -0,0 +1,326 @@ +//===- CallGraph.h - Build a Module's call graph ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This interface is used to build and manipulate a call graph, which is a very +// useful tool for interprocedural optimization. +// +// Every function in a module is represented as a node in the call graph. The +// callgraph node keeps track of which functions the are called by the function +// corresponding to the node. +// +// A call graph may contain nodes where the function that they correspond to is +// null. These 'external' nodes are used to represent control flow that is not +// represented (or analyzable) in the module. In particular, this analysis +// builds one external node such that: +// 1. All functions in the module without internal linkage will have edges +// from this external node, indicating that they could be called by +// functions outside of the module. +// 2. All functions whose address is used for something more than a direct +// call, for example being stored into a memory location will also have an +// edge from this external node. Since they may be called by an unknown +// caller later, they must be tracked as such. +// +// There is a second external node added for calls that leave this module. +// Functions have a call edge to the external node iff: +// 1. The function is external, reflecting the fact that they could call +// anything without internal linkage or that has its address taken. +// 2. The function contains an indirect function call. +// +// As an extension in the future, there may be multiple nodes with a null +// function. These will be used when we can prove (through pointer analysis) +// that an indirect call site can call only a specific set of functions. +// +// Because of these properties, the CallGraph captures a conservative superset +// of all of the caller-callee relationships, which is useful for +// transformations. +// +// The CallGraph class also attempts to figure out what the root of the +// CallGraph is, which it currently does by looking for a function named 'main'. +// If no function named 'main' is found, the external node is used as the entry +// node, reflecting the fact that any function without internal linkage could +// be called into (which is common for libraries). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_CALLGRAPH_H +#define LLVM_ANALYSIS_CALLGRAPH_H + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Pass.h" +#include "llvm/Support/CallSite.h" +#include "llvm/System/IncludeFile.h" +#include <map> + +namespace llvm { + +class Function; +class Module; +class CallGraphNode; + +//===----------------------------------------------------------------------===// +// CallGraph class definition +// +class CallGraph { +protected: + Module *Mod; // The module this call graph represents + + typedef std::map<const Function *, CallGraphNode *> FunctionMapTy; + FunctionMapTy FunctionMap; // Map from a function to its node + +public: + static char ID; // Class identification, replacement for typeinfo + //===--------------------------------------------------------------------- + // Accessors... + // + typedef FunctionMapTy::iterator iterator; + typedef FunctionMapTy::const_iterator const_iterator; + + /// getModule - Return the module the call graph corresponds to. + /// + Module &getModule() const { return *Mod; } + + inline iterator begin() { return FunctionMap.begin(); } + inline iterator end() { return FunctionMap.end(); } + inline const_iterator begin() const { return FunctionMap.begin(); } + inline const_iterator end() const { return FunctionMap.end(); } + + // Subscripting operators, return the call graph node for the provided + // function + inline const CallGraphNode *operator[](const Function *F) const { + const_iterator I = FunctionMap.find(F); + assert(I != FunctionMap.end() && "Function not in callgraph!"); + return I->second; + } + inline CallGraphNode *operator[](const Function *F) { + const_iterator I = FunctionMap.find(F); + assert(I != FunctionMap.end() && "Function not in callgraph!"); + return I->second; + } + + /// Returns the CallGraphNode which is used to represent undetermined calls + /// into the callgraph. Override this if you want behavioral inheritance. + virtual CallGraphNode* getExternalCallingNode() const { return 0; } + + /// Return the root/main method in the module, or some other root node, such + /// as the externalcallingnode. Overload these if you behavioral + /// inheritance. + virtual CallGraphNode* getRoot() { return 0; } + virtual const CallGraphNode* getRoot() const { return 0; } + + //===--------------------------------------------------------------------- + // Functions to keep a call graph up to date with a function that has been + // modified. + // + + /// removeFunctionFromModule - Unlink the function from this module, returning + /// it. Because this removes the function from the module, the call graph + /// node is destroyed. This is only valid if the function does not call any + /// other functions (ie, there are no edges in it's CGN). The easiest way to + /// do this is to dropAllReferences before calling this. + /// + Function *removeFunctionFromModule(CallGraphNode *CGN); + Function *removeFunctionFromModule(Function *F) { + return removeFunctionFromModule((*this)[F]); + } + + /// changeFunction - This method changes the function associated with this + /// CallGraphNode, for use by transformations that need to change the + /// prototype of a Function (thus they must create a new Function and move the + /// old code over). + void changeFunction(Function *OldF, Function *NewF); + + /// getOrInsertFunction - This method is identical to calling operator[], but + /// it will insert a new CallGraphNode for the specified function if one does + /// not already exist. + CallGraphNode *getOrInsertFunction(const Function *F); + + //===--------------------------------------------------------------------- + // Pass infrastructure interface glue code... + // +protected: + CallGraph() {} + +public: + virtual ~CallGraph() { destroy(); } + + /// initialize - Call this method before calling other methods, + /// re/initializes the state of the CallGraph. + /// + void initialize(Module &M); + + virtual void print(std::ostream &o, const Module *M) const; + void print(std::ostream *o, const Module *M) const { if (o) print(*o, M); } + void dump() const; + +protected: + // destroy - Release memory for the call graph + virtual void destroy(); +}; + +//===----------------------------------------------------------------------===// +// CallGraphNode class definition +// +class CallGraphNode { + Function *F; + typedef std::pair<CallSite,CallGraphNode*> CallRecord; + std::vector<CallRecord> CalledFunctions; + + CallGraphNode(const CallGraphNode &); // Do not implement +public: + typedef std::vector<CallRecord> CalledFunctionsVector; + + //===--------------------------------------------------------------------- + // Accessor methods... + // + + typedef std::vector<CallRecord>::iterator iterator; + typedef std::vector<CallRecord>::const_iterator const_iterator; + + // getFunction - Return the function that this call graph node represents... + Function *getFunction() const { return F; } + + inline iterator begin() { return CalledFunctions.begin(); } + inline iterator end() { return CalledFunctions.end(); } + inline const_iterator begin() const { return CalledFunctions.begin(); } + inline const_iterator end() const { return CalledFunctions.end(); } + inline bool empty() const { return CalledFunctions.empty(); } + inline unsigned size() const { return (unsigned)CalledFunctions.size(); } + + // Subscripting operator - Return the i'th called function... + // + CallGraphNode *operator[](unsigned i) const { + return CalledFunctions[i].second; + } + + /// dump - Print out this call graph node. + /// + void dump() const; + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + //===--------------------------------------------------------------------- + // Methods to keep a call graph up to date with a function that has been + // modified + // + + /// removeAllCalledFunctions - As the name implies, this removes all edges + /// from this CallGraphNode to any functions it calls. + void removeAllCalledFunctions() { + CalledFunctions.clear(); + } + + /// addCalledFunction - Add a function to the list of functions called by this + /// one. + void addCalledFunction(CallSite CS, CallGraphNode *M) { + CalledFunctions.push_back(std::make_pair(CS, M)); + } + + /// removeCallEdgeFor - This method removes the edge in the node for the + /// specified call site. Note that this method takes linear time, so it + /// should be used sparingly. + void removeCallEdgeFor(CallSite CS); + + /// removeAnyCallEdgeTo - This method removes all call edges from this node + /// to the specified callee function. This takes more time to execute than + /// removeCallEdgeTo, so it should not be used unless necessary. + void removeAnyCallEdgeTo(CallGraphNode *Callee); + + /// removeOneAbstractEdgeTo - Remove one edge associated with a null callsite + /// from this node to the specified callee function. + void removeOneAbstractEdgeTo(CallGraphNode *Callee); + + /// replaceCallSite - Make the edge in the node for Old CallSite be for + /// New CallSite instead. Note that this method takes linear time, so it + /// should be used sparingly. + void replaceCallSite(CallSite Old, CallSite New); + + friend class CallGraph; + + // CallGraphNode ctor - Create a node for the specified function. + inline CallGraphNode(Function *f) : F(f) {} +}; + +//===----------------------------------------------------------------------===// +// GraphTraits specializations for call graphs so that they can be treated as +// graphs by the generic graph algorithms. +// + +// Provide graph traits for tranversing call graphs using standard graph +// traversals. +template <> struct GraphTraits<CallGraphNode*> { + typedef CallGraphNode NodeType; + + typedef std::pair<CallSite, CallGraphNode*> CGNPairTy; + typedef std::pointer_to_unary_function<CGNPairTy, CallGraphNode*> CGNDerefFun; + + static NodeType *getEntryNode(CallGraphNode *CGN) { return CGN; } + + typedef mapped_iterator<NodeType::iterator, CGNDerefFun> ChildIteratorType; + + static inline ChildIteratorType child_begin(NodeType *N) { + return map_iterator(N->begin(), CGNDerefFun(CGNDeref)); + } + static inline ChildIteratorType child_end (NodeType *N) { + return map_iterator(N->end(), CGNDerefFun(CGNDeref)); + } + + static CallGraphNode *CGNDeref(CGNPairTy P) { + return P.second; + } + +}; + +template <> struct GraphTraits<const CallGraphNode*> { + typedef const CallGraphNode NodeType; + typedef NodeType::const_iterator ChildIteratorType; + + static NodeType *getEntryNode(const CallGraphNode *CGN) { return CGN; } + static inline ChildIteratorType child_begin(NodeType *N) { return N->begin();} + static inline ChildIteratorType child_end (NodeType *N) { return N->end(); } +}; + +template<> struct GraphTraits<CallGraph*> : public GraphTraits<CallGraphNode*> { + static NodeType *getEntryNode(CallGraph *CGN) { + return CGN->getExternalCallingNode(); // Start at the external node! + } + typedef std::pair<const Function*, CallGraphNode*> PairTy; + typedef std::pointer_to_unary_function<PairTy, CallGraphNode&> DerefFun; + + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + typedef mapped_iterator<CallGraph::iterator, DerefFun> nodes_iterator; + static nodes_iterator nodes_begin(CallGraph *CG) { + return map_iterator(CG->begin(), DerefFun(CGdereference)); + } + static nodes_iterator nodes_end (CallGraph *CG) { + return map_iterator(CG->end(), DerefFun(CGdereference)); + } + + static CallGraphNode &CGdereference(PairTy P) { + return *P.second; + } +}; + +template<> struct GraphTraits<const CallGraph*> : + public GraphTraits<const CallGraphNode*> { + static NodeType *getEntryNode(const CallGraph *CGN) { + return CGN->getExternalCallingNode(); + } + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + typedef CallGraph::const_iterator nodes_iterator; + static nodes_iterator nodes_begin(const CallGraph *CG) { return CG->begin(); } + static nodes_iterator nodes_end (const CallGraph *CG) { return CG->end(); } +}; + +} // End llvm namespace + +// Make sure that any clients of this file link in CallGraph.cpp +FORCE_DEFINING_FILE_TO_BE_LINKED(CallGraph) + +#endif diff --git a/include/llvm/Analysis/CaptureTracking.h b/include/llvm/Analysis/CaptureTracking.h new file mode 100644 index 0000000..a0ff503 --- /dev/null +++ b/include/llvm/Analysis/CaptureTracking.h @@ -0,0 +1,29 @@ +//===----- llvm/Analysis/CaptureTracking.h - Pointer capture ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains routines that help determine which pointers are captured. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H +#define LLVM_ANALYSIS_CAPTURETRACKING_H + +namespace llvm { + class Value; + + /// PointerMayBeCaptured - Return true if this pointer value may be captured + /// by the enclosing function (which is required to exist). This routine can + /// be expensive, so consider caching the results. The boolean ReturnCaptures + /// specifies whether returning the value (or part of it) from the function + /// counts as capturing it or not. + bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures); + +} // end namespace llvm + +#endif diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h new file mode 100644 index 0000000..bf360f7 --- /dev/null +++ b/include/llvm/Analysis/ConstantFolding.h @@ -0,0 +1,73 @@ +//===-- ConstantFolding.h - Analyze constant folding possibilities --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This family of functions determines the possibility of performing constant +// folding. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H +#define LLVM_ANALYSIS_CONSTANTFOLDING_H + +namespace llvm { + class Constant; + class ConstantExpr; + class Instruction; + class TargetData; + class Function; + class Type; + +/// ConstantFoldInstruction - Attempt to constant fold the specified +/// instruction. If successful, the constant result is returned, if not, null +/// is returned. Note that this function can only fail when attempting to fold +/// instructions like loads and stores, which have no constant expression form. +/// +Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0); + +/// ConstantFoldConstantExpression - Attempt to fold the constant expression +/// using the specified TargetData. If successful, the constant result is +/// result is returned, if not, null is returned. +Constant *ConstantFoldConstantExpression(ConstantExpr *CE, + const TargetData *TD); + +/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the +/// specified operands. If successful, the constant result is returned, if not, +/// null is returned. Note that this function can fail when attempting to +/// fold instructions like loads and stores, which have no constant expression +/// form. +/// +Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, + Constant*const * Ops, unsigned NumOps, + const TargetData *TD = 0); + +/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare +/// instruction (icmp/fcmp) with the specified operands. If it fails, it +/// returns a constant expression of the specified operands. +/// +Constant *ConstantFoldCompareInstOperands(unsigned Predicate, + Constant*const * Ops, unsigned NumOps, + const TargetData *TD = 0); + + +/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a +/// getelementptr constantexpr, return the constant value being addressed by the +/// constant expression, or null if something is funny and we can't decide. +Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE); + +/// canConstantFoldCallTo - Return true if its even possible to fold a call to +/// the specified function. +bool canConstantFoldCallTo(const Function *F); + +/// ConstantFoldCall - Attempt to constant fold a call to the specified function +/// with the specified arguments, returning null if unsuccessful. +Constant * +ConstantFoldCall(Function *F, Constant* const* Operands, unsigned NumOperands); +} + +#endif diff --git a/include/llvm/Analysis/ConstantsScanner.h b/include/llvm/Analysis/ConstantsScanner.h new file mode 100644 index 0000000..bac551f --- /dev/null +++ b/include/llvm/Analysis/ConstantsScanner.h @@ -0,0 +1,93 @@ +//==- llvm/Analysis/ConstantsScanner.h - Iterate over constants -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class implements an iterator to walk through the constants referenced by +// a method. This is used by the Bitcode & Assembly writers to build constant +// pools. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_CONSTANTSSCANNER_H +#define LLVM_ANALYSIS_CONSTANTSSCANNER_H + +#include "llvm/Support/InstIterator.h" +#include "llvm/ADT/iterator.h" + +namespace llvm { + +class Constant; + +class constant_iterator : public forward_iterator<const Constant, ptrdiff_t> { + const_inst_iterator InstI; // Method instruction iterator + unsigned OpIdx; // Operand index + + typedef constant_iterator _Self; + + inline bool isAtConstant() const { + assert(!InstI.atEnd() && OpIdx < InstI->getNumOperands() && + "isAtConstant called with invalid arguments!"); + return isa<Constant>(InstI->getOperand(OpIdx)); + } + +public: + inline constant_iterator(const Function *F) : InstI(inst_begin(F)), OpIdx(0) { + // Advance to first constant... if we are not already at constant or end + if (InstI != inst_end(F) && // InstI is valid? + (InstI->getNumOperands() == 0 || !isAtConstant())) // Not at constant? + operator++(); + } + + inline constant_iterator(const Function *F, bool) // end ctor + : InstI(inst_end(F)), OpIdx(0) { + } + + inline bool operator==(const _Self& x) const { return OpIdx == x.OpIdx && + InstI == x.InstI; } + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline pointer operator*() const { + assert(isAtConstant() && "Dereferenced an iterator at the end!"); + return cast<Constant>(InstI->getOperand(OpIdx)); + } + inline pointer operator->() const { return operator*(); } + + inline _Self& operator++() { // Preincrement implementation + ++OpIdx; + do { + unsigned NumOperands = InstI->getNumOperands(); + while (OpIdx < NumOperands && !isAtConstant()) { + ++OpIdx; + } + + if (OpIdx < NumOperands) return *this; // Found a constant! + ++InstI; + OpIdx = 0; + } while (!InstI.atEnd()); + + return *this; // At the end of the method + } + + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } + + inline bool atEnd() const { return InstI.atEnd(); } +}; + +inline constant_iterator constant_begin(const Function *F) { + return constant_iterator(F); +} + +inline constant_iterator constant_end(const Function *F) { + return constant_iterator(F, true); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/DebugInfo.h b/include/llvm/Analysis/DebugInfo.h new file mode 100644 index 0000000..972bb07 --- /dev/null +++ b/include/llvm/Analysis/DebugInfo.h @@ -0,0 +1,555 @@ +//===--- llvm/Analysis/DebugInfo.h - Debug Information Helpers --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a bunch of datatypes that are useful for creating and +// walking debug info in LLVM IR form. They essentially provide wrappers around +// the information in the global variables that's needed when constructing the +// DWARF information. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_DEBUGINFO_H +#define LLVM_ANALYSIS_DEBUGINFO_H + +#include "llvm/Target/TargetMachine.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Support/Dwarf.h" + +namespace llvm { + class BasicBlock; + class Constant; + class Function; + class GlobalVariable; + class Module; + class Type; + class Value; + struct DbgStopPointInst; + struct DbgDeclareInst; + class Instruction; + + class DIDescriptor { + protected: + GlobalVariable *GV; + + /// DIDescriptor constructor. If the specified GV is non-null, this checks + /// to make sure that the tag in the descriptor matches 'RequiredTag'. If + /// not, the debug info is corrupt and we ignore it. + DIDescriptor(GlobalVariable *GV, unsigned RequiredTag); + + const std::string &getStringField(unsigned Elt, std::string &Result) const; + unsigned getUnsignedField(unsigned Elt) const { + return (unsigned)getUInt64Field(Elt); + } + uint64_t getUInt64Field(unsigned Elt) const; + DIDescriptor getDescriptorField(unsigned Elt) const; + + template <typename DescTy> + DescTy getFieldAs(unsigned Elt) const { + return DescTy(getDescriptorField(Elt).getGV()); + } + + GlobalVariable *getGlobalVariableField(unsigned Elt) const; + + public: + explicit DIDescriptor() : GV(0) {} + explicit DIDescriptor(GlobalVariable *gv) : GV(gv) {} + + bool isNull() const { return GV == 0; } + + GlobalVariable *getGV() const { return GV; } + + unsigned getVersion() const { + return getUnsignedField(0) & LLVMDebugVersionMask; + } + + unsigned getTag() const { + return getUnsignedField(0) & ~LLVMDebugVersionMask; + } + + /// ValidDebugInfo - Return true if V represents valid debug info value. + static bool ValidDebugInfo(Value *V, CodeGenOpt::Level OptLevel); + + /// dump - print descriptor. + void dump() const; + }; + + /// DIAnchor - A wrapper for various anchor descriptors. + class DIAnchor : public DIDescriptor { + public: + explicit DIAnchor(GlobalVariable *GV = 0) + : DIDescriptor(GV, dwarf::DW_TAG_anchor) {} + + unsigned getAnchorTag() const { return getUnsignedField(1); } + }; + + /// DISubrange - This is used to represent ranges, for array bounds. + class DISubrange : public DIDescriptor { + public: + explicit DISubrange(GlobalVariable *GV = 0) + : DIDescriptor(GV, dwarf::DW_TAG_subrange_type) {} + + int64_t getLo() const { return (int64_t)getUInt64Field(1); } + int64_t getHi() const { return (int64_t)getUInt64Field(2); } + }; + + /// DIArray - This descriptor holds an array of descriptors. + class DIArray : public DIDescriptor { + public: + explicit DIArray(GlobalVariable *GV = 0) : DIDescriptor(GV) {} + + unsigned getNumElements() const; + DIDescriptor getElement(unsigned Idx) const { + return getDescriptorField(Idx); + } + }; + + /// DICompileUnit - A wrapper for a compile unit. + class DICompileUnit : public DIDescriptor { + public: + explicit DICompileUnit(GlobalVariable *GV = 0) + : DIDescriptor(GV, dwarf::DW_TAG_compile_unit) {} + + unsigned getLanguage() const { return getUnsignedField(2); } + const std::string &getFilename(std::string &F) const { + return getStringField(3, F); + } + const std::string &getDirectory(std::string &F) const { + return getStringField(4, F); + } + const std::string &getProducer(std::string &F) const { + return getStringField(5, F); + } + + /// isMain - Each input file is encoded as a separate compile unit in LLVM + /// debugging information output. However, many target specific tool chains + /// prefer to encode only one compile unit in an object file. In this + /// situation, the LLVM code generator will include debugging information + /// entities in the compile unit that is marked as main compile unit. The + /// code generator accepts maximum one main compile unit per module. If a + /// module does not contain any main compile unit then the code generator + /// will emit multiple compile units in the output object file. + + bool isMain() const { return getUnsignedField(6); } + bool isOptimized() const { return getUnsignedField(7); } + const std::string &getFlags(std::string &F) const { + return getStringField(8, F); + } + unsigned getRunTimeVersion() const { return getUnsignedField(9); } + + /// Verify - Verify that a compile unit is well formed. + bool Verify() const; + + /// dump - print compile unit. + void dump() const; + }; + + /// DIEnumerator - A wrapper for an enumerator (e.g. X and Y in 'enum {X,Y}'). + /// FIXME: it seems strange that this doesn't have either a reference to the + /// type/precision or a file/line pair for location info. + class DIEnumerator : public DIDescriptor { + public: + explicit DIEnumerator(GlobalVariable *GV = 0) + : DIDescriptor(GV, dwarf::DW_TAG_enumerator) {} + + const std::string &getName(std::string &F) const { + return getStringField(1, F); + } + uint64_t getEnumValue() const { return getUInt64Field(2); } + }; + + /// DIType - This is a wrapper for a type. + /// FIXME: Types should be factored much better so that CV qualifiers and + /// others do not require a huge and empty descriptor full of zeros. + class DIType : public DIDescriptor { + public: + enum { + FlagPrivate = 1 << 0, + FlagProtected = 1 << 1, + FlagFwdDecl = 1 << 2 + }; + + protected: + DIType(GlobalVariable *GV, unsigned Tag) : DIDescriptor(GV, Tag) {} + // This ctor is used when the Tag has already been validated by a derived + // ctor. + DIType(GlobalVariable *GV, bool, bool) : DIDescriptor(GV) {} + + public: + /// isDerivedType - Return true if the specified tag is legal for + /// DIDerivedType. + static bool isDerivedType(unsigned TAG); + + /// isCompositeType - Return true if the specified tag is legal for + /// DICompositeType. + static bool isCompositeType(unsigned TAG); + + /// isBasicType - Return true if the specified tag is legal for + /// DIBasicType. + static bool isBasicType(unsigned TAG) { + return TAG == dwarf::DW_TAG_base_type; + } + + /// Verify - Verify that a type descriptor is well formed. + bool Verify() const; + public: + explicit DIType(GlobalVariable *GV); + explicit DIType() {} + virtual ~DIType() {} + + DIDescriptor getContext() const { return getDescriptorField(1); } + const std::string &getName(std::string &F) const { + return getStringField(2, F); + } + DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(3); } + unsigned getLineNumber() const { return getUnsignedField(4); } + uint64_t getSizeInBits() const { return getUInt64Field(5); } + uint64_t getAlignInBits() const { return getUInt64Field(6); } + // FIXME: Offset is only used for DW_TAG_member nodes. Making every type + // carry this is just plain insane. + uint64_t getOffsetInBits() const { return getUInt64Field(7); } + unsigned getFlags() const { return getUnsignedField(8); } + bool isPrivate() const { return (getFlags() & FlagPrivate) != 0; } + bool isProtected() const { return (getFlags() & FlagProtected) != 0; } + bool isForwardDecl() const { return (getFlags() & FlagFwdDecl) != 0; } + + /// dump - print type. + void dump() const; + }; + + /// DIBasicType - A basic type, like 'int' or 'float'. + class DIBasicType : public DIType { + public: + explicit DIBasicType(GlobalVariable *GV) + : DIType(GV, dwarf::DW_TAG_base_type) {} + + unsigned getEncoding() const { return getUnsignedField(9); } + + /// dump - print basic type. + void dump() const; + }; + + /// DIDerivedType - A simple derived type, like a const qualified type, + /// a typedef, a pointer or reference, etc. + class DIDerivedType : public DIType { + protected: + explicit DIDerivedType(GlobalVariable *GV, bool, bool) + : DIType(GV, true, true) {} + public: + explicit DIDerivedType(GlobalVariable *GV) + : DIType(GV, true, true) { + if (GV && !isDerivedType(getTag())) + GV = 0; + } + + DIType getTypeDerivedFrom() const { return getFieldAs<DIType>(9); } + + /// getOriginalTypeSize - If this type is derived from a base type then + /// return base type size. + uint64_t getOriginalTypeSize() const; + /// dump - print derived type. + void dump() const; + }; + + /// DICompositeType - This descriptor holds a type that can refer to multiple + /// other types, like a function or struct. + /// FIXME: Why is this a DIDerivedType?? + class DICompositeType : public DIDerivedType { + public: + explicit DICompositeType(GlobalVariable *GV) + : DIDerivedType(GV, true, true) { + if (GV && !isCompositeType(getTag())) + GV = 0; + } + + DIArray getTypeArray() const { return getFieldAs<DIArray>(10); } + unsigned getRunTimeLang() const { return getUnsignedField(11); } + + /// Verify - Verify that a composite type descriptor is well formed. + bool Verify() const; + + /// dump - print composite type. + void dump() const; + }; + + /// DIGlobal - This is a common class for global variables and subprograms. + class DIGlobal : public DIDescriptor { + protected: + explicit DIGlobal(GlobalVariable *GV, unsigned RequiredTag) + : DIDescriptor(GV, RequiredTag) {} + + /// isSubprogram - Return true if the specified tag is legal for + /// DISubprogram. + static bool isSubprogram(unsigned TAG) { + return TAG == dwarf::DW_TAG_subprogram; + } + + /// isGlobalVariable - Return true if the specified tag is legal for + /// DIGlobalVariable. + static bool isGlobalVariable(unsigned TAG) { + return TAG == dwarf::DW_TAG_variable; + } + + public: + virtual ~DIGlobal() {} + + DIDescriptor getContext() const { return getDescriptorField(2); } + const std::string &getName(std::string &F) const { + return getStringField(3, F); + } + const std::string &getDisplayName(std::string &F) const { + return getStringField(4, F); + } + const std::string &getLinkageName(std::string &F) const { + return getStringField(5, F); + } + DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(6); } + unsigned getLineNumber() const { return getUnsignedField(7); } + DIType getType() const { return getFieldAs<DIType>(8); } + + /// isLocalToUnit - Return true if this subprogram is local to the current + /// compile unit, like 'static' in C. + unsigned isLocalToUnit() const { return getUnsignedField(9); } + unsigned isDefinition() const { return getUnsignedField(10); } + + /// dump - print global. + void dump() const; + }; + + /// DISubprogram - This is a wrapper for a subprogram (e.g. a function). + class DISubprogram : public DIGlobal { + public: + explicit DISubprogram(GlobalVariable *GV = 0) + : DIGlobal(GV, dwarf::DW_TAG_subprogram) {} + + DICompositeType getType() const { return getFieldAs<DICompositeType>(8); } + + /// Verify - Verify that a subprogram descriptor is well formed. + bool Verify() const; + + /// dump - print subprogram. + void dump() const; + + /// describes - Return true if this subprogram provides debugging + /// information for the function F. + bool describes(const Function *F); + }; + + /// DIGlobalVariable - This is a wrapper for a global variable. + class DIGlobalVariable : public DIGlobal { + public: + explicit DIGlobalVariable(GlobalVariable *GV = 0) + : DIGlobal(GV, dwarf::DW_TAG_variable) {} + + GlobalVariable *getGlobal() const { return getGlobalVariableField(11); } + + /// Verify - Verify that a global variable descriptor is well formed. + bool Verify() const; + + /// dump - print global variable. + void dump() const; + }; + + /// DIVariable - This is a wrapper for a variable (e.g. parameter, local, + /// global etc). + class DIVariable : public DIDescriptor { + public: + explicit DIVariable(GlobalVariable *gv = 0) + : DIDescriptor(gv) { + if (gv && !isVariable(getTag())) + GV = 0; + } + + DIDescriptor getContext() const { return getDescriptorField(1); } + const std::string &getName(std::string &F) const { + return getStringField(2, F); + } + DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(3); } + unsigned getLineNumber() const { return getUnsignedField(4); } + DIType getType() const { return getFieldAs<DIType>(5); } + + /// isVariable - Return true if the specified tag is legal for DIVariable. + static bool isVariable(unsigned Tag); + + /// Verify - Verify that a variable descriptor is well formed. + bool Verify() const; + + /// dump - print variable. + void dump() const; + }; + + /// DIBlock - This is a wrapper for a block (e.g. a function, scope, etc). + class DIBlock : public DIDescriptor { + public: + explicit DIBlock(GlobalVariable *GV = 0) + : DIDescriptor(GV, dwarf::DW_TAG_lexical_block) {} + + DIDescriptor getContext() const { return getDescriptorField(1); } + }; + + /// DIFactory - This object assists with the construction of the various + /// descriptors. + class DIFactory { + Module &M; + // Cached values for uniquing and faster lookups. + DIAnchor CompileUnitAnchor, SubProgramAnchor, GlobalVariableAnchor; + const Type *EmptyStructPtr; // "{}*". + Function *StopPointFn; // llvm.dbg.stoppoint + Function *FuncStartFn; // llvm.dbg.func.start + Function *RegionStartFn; // llvm.dbg.region.start + Function *RegionEndFn; // llvm.dbg.region.end + Function *DeclareFn; // llvm.dbg.declare + StringMap<Constant*> StringCache; + DenseMap<Constant*, DIDescriptor> SimpleConstantCache; + + DIFactory(const DIFactory &); // DO NOT IMPLEMENT + void operator=(const DIFactory&); // DO NOT IMPLEMENT + public: + explicit DIFactory(Module &m); + + /// GetOrCreateCompileUnitAnchor - Return the anchor for compile units, + /// creating a new one if there isn't already one in the module. + DIAnchor GetOrCreateCompileUnitAnchor(); + + /// GetOrCreateSubprogramAnchor - Return the anchor for subprograms, + /// creating a new one if there isn't already one in the module. + DIAnchor GetOrCreateSubprogramAnchor(); + + /// GetOrCreateGlobalVariableAnchor - Return the anchor for globals, + /// creating a new one if there isn't already one in the module. + DIAnchor GetOrCreateGlobalVariableAnchor(); + + /// GetOrCreateArray - Create an descriptor for an array of descriptors. + /// This implicitly uniques the arrays created. + DIArray GetOrCreateArray(DIDescriptor *Tys, unsigned NumTys); + + /// GetOrCreateSubrange - Create a descriptor for a value range. This + /// implicitly uniques the values returned. + DISubrange GetOrCreateSubrange(int64_t Lo, int64_t Hi); + + /// CreateCompileUnit - Create a new descriptor for the specified compile + /// unit. + DICompileUnit CreateCompileUnit(unsigned LangID, + const std::string &Filename, + const std::string &Directory, + const std::string &Producer, + bool isMain = false, + bool isOptimized = false, + const char *Flags = "", + unsigned RunTimeVer = 0); + + /// CreateEnumerator - Create a single enumerator value. + DIEnumerator CreateEnumerator(const std::string &Name, uint64_t Val); + + /// CreateBasicType - Create a basic type like int, float, etc. + DIBasicType CreateBasicType(DIDescriptor Context, const std::string &Name, + DICompileUnit CompileUnit, unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, + unsigned Encoding); + + /// CreateDerivedType - Create a derived type like const qualified type, + /// pointer, typedef, etc. + DIDerivedType CreateDerivedType(unsigned Tag, DIDescriptor Context, + const std::string &Name, + DICompileUnit CompileUnit, + unsigned LineNumber, + uint64_t SizeInBits, uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, + DIType DerivedFrom); + + /// CreateCompositeType - Create a composite type like array, struct, etc. + DICompositeType CreateCompositeType(unsigned Tag, DIDescriptor Context, + const std::string &Name, + DICompileUnit CompileUnit, + unsigned LineNumber, + uint64_t SizeInBits, + uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, + DIType DerivedFrom, + DIArray Elements, + unsigned RunTimeLang = 0); + + /// CreateSubprogram - Create a new descriptor for the specified subprogram. + /// See comments in DISubprogram for descriptions of these fields. + DISubprogram CreateSubprogram(DIDescriptor Context, const std::string &Name, + const std::string &DisplayName, + const std::string &LinkageName, + DICompileUnit CompileUnit, unsigned LineNo, + DIType Type, bool isLocalToUnit, + bool isDefinition); + + /// CreateGlobalVariable - Create a new descriptor for the specified global. + DIGlobalVariable + CreateGlobalVariable(DIDescriptor Context, const std::string &Name, + const std::string &DisplayName, + const std::string &LinkageName, + DICompileUnit CompileUnit, + unsigned LineNo, DIType Type, bool isLocalToUnit, + bool isDefinition, llvm::GlobalVariable *GV); + + /// CreateVariable - Create a new descriptor for the specified variable. + DIVariable CreateVariable(unsigned Tag, DIDescriptor Context, + const std::string &Name, + DICompileUnit CompileUnit, unsigned LineNo, + DIType Type); + + /// CreateBlock - This creates a descriptor for a lexical block with the + /// specified parent context. + DIBlock CreateBlock(DIDescriptor Context); + + /// InsertStopPoint - Create a new llvm.dbg.stoppoint intrinsic invocation, + /// inserting it at the end of the specified basic block. + void InsertStopPoint(DICompileUnit CU, unsigned LineNo, unsigned ColNo, + BasicBlock *BB); + + /// InsertSubprogramStart - Create a new llvm.dbg.func.start intrinsic to + /// mark the start of the specified subprogram. + void InsertSubprogramStart(DISubprogram SP, BasicBlock *BB); + + /// InsertRegionStart - Insert a new llvm.dbg.region.start intrinsic call to + /// mark the start of a region for the specified scoping descriptor. + void InsertRegionStart(DIDescriptor D, BasicBlock *BB); + + /// InsertRegionEnd - Insert a new llvm.dbg.region.end intrinsic call to + /// mark the end of a region for the specified scoping descriptor. + void InsertRegionEnd(DIDescriptor D, BasicBlock *BB); + + /// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call. + void InsertDeclare(llvm::Value *Storage, DIVariable D, BasicBlock *BB); + + private: + Constant *GetTagConstant(unsigned TAG); + Constant *GetStringConstant(const std::string &String); + DIAnchor GetOrCreateAnchor(unsigned TAG, const char *Name); + + /// getCastToEmpty - Return the descriptor as a Constant* with type '{}*'. + Constant *getCastToEmpty(DIDescriptor D); + }; + + /// Finds the stoppoint coressponding to this instruction, that is the + /// stoppoint that dominates this instruction + const DbgStopPointInst *findStopPoint(const Instruction *Inst); + + /// Finds the stoppoint corresponding to first real (non-debug intrinsic) + /// instruction in this Basic Block, and returns the stoppoint for it. + const DbgStopPointInst *findBBStopPoint(const BasicBlock *BB); + + /// Finds the dbg.declare intrinsic corresponding to this value if any. + /// It looks through pointer casts too. + const DbgDeclareInst *findDbgDeclare(const Value *V, bool stripCasts = true); + + /// Find the debug info descriptor corresponding to this global variable. + Value *findDbgGlobalDeclare(GlobalVariable *V); + + bool getLocationInfo(const Value *V, std::string &DisplayName, std::string &Type, + unsigned &LineNo, std::string &File, std::string &Dir); +} // end namespace llvm + +#endif diff --git a/include/llvm/Analysis/DominatorInternals.h b/include/llvm/Analysis/DominatorInternals.h new file mode 100644 index 0000000..cca0d50 --- /dev/null +++ b/include/llvm/Analysis/DominatorInternals.h @@ -0,0 +1,363 @@ +//=== llvm/Analysis/DominatorInternals.h - Dominator Calculation -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_DOMINATOR_INTERNALS_H +#define LLVM_ANALYSIS_DOMINATOR_INTERNALS_H + +#include "llvm/Analysis/Dominators.h" +#include "llvm/ADT/SmallPtrSet.h" + +//===----------------------------------------------------------------------===// +// +// DominatorTree construction - This pass constructs immediate dominator +// information for a flow-graph based on the algorithm described in this +// document: +// +// A Fast Algorithm for Finding Dominators in a Flowgraph +// T. Lengauer & R. Tarjan, ACM TOPLAS July 1979, pgs 121-141. +// +// This implements both the O(n*ack(n)) and the O(n*log(n)) versions of EVAL and +// LINK, but it turns out that the theoretically slower O(n*log(n)) +// implementation is actually faster than the "efficient" algorithm (even for +// large CFGs) because the constant overheads are substantially smaller. The +// lower-complexity version can be enabled with the following #define: +// +#define BALANCE_IDOM_TREE 0 +// +//===----------------------------------------------------------------------===// + +namespace llvm { + +template<class GraphT> +unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType* V, unsigned N) { + // This is more understandable as a recursive algorithm, but we can't use the + // recursive algorithm due to stack depth issues. Keep it here for + // documentation purposes. +#if 0 + InfoRec &VInfo = DT.Info[DT.Roots[i]]; + VInfo.DFSNum = VInfo.Semi = ++N; + VInfo.Label = V; + + Vertex.push_back(V); // Vertex[n] = V; + //Info[V].Ancestor = 0; // Ancestor[n] = 0 + //Info[V].Child = 0; // Child[v] = 0 + VInfo.Size = 1; // Size[v] = 1 + + for (succ_iterator SI = succ_begin(V), E = succ_end(V); SI != E; ++SI) { + InfoRec &SuccVInfo = DT.Info[*SI]; + if (SuccVInfo.Semi == 0) { + SuccVInfo.Parent = V; + N = DTDFSPass(DT, *SI, N); + } + } +#else + bool IsChilOfArtificialExit = (N != 0); + + std::vector<std::pair<typename GraphT::NodeType*, + typename GraphT::ChildIteratorType> > Worklist; + Worklist.push_back(std::make_pair(V, GraphT::child_begin(V))); + while (!Worklist.empty()) { + typename GraphT::NodeType* BB = Worklist.back().first; + typename GraphT::ChildIteratorType NextSucc = Worklist.back().second; + + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo = + DT.Info[BB]; + + // First time we visited this BB? + if (NextSucc == GraphT::child_begin(BB)) { + BBInfo.DFSNum = BBInfo.Semi = ++N; + BBInfo.Label = BB; + + DT.Vertex.push_back(BB); // Vertex[n] = V; + //BBInfo[V].Ancestor = 0; // Ancestor[n] = 0 + //BBInfo[V].Child = 0; // Child[v] = 0 + BBInfo.Size = 1; // Size[v] = 1 + + if (IsChilOfArtificialExit) + BBInfo.Parent = 1; + + IsChilOfArtificialExit = false; + } + + // store the DFS number of the current BB - the reference to BBInfo might + // get invalidated when processing the successors. + unsigned BBDFSNum = BBInfo.DFSNum; + + // If we are done with this block, remove it from the worklist. + if (NextSucc == GraphT::child_end(BB)) { + Worklist.pop_back(); + continue; + } + + // Increment the successor number for the next time we get to it. + ++Worklist.back().second; + + // Visit the successor next, if it isn't already visited. + typename GraphT::NodeType* Succ = *NextSucc; + + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &SuccVInfo = + DT.Info[Succ]; + if (SuccVInfo.Semi == 0) { + SuccVInfo.Parent = BBDFSNum; + Worklist.push_back(std::make_pair(Succ, GraphT::child_begin(Succ))); + } + } +#endif + return N; +} + +template<class GraphT> +void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType *VIn) { + std::vector<typename GraphT::NodeType*> Work; + SmallPtrSet<typename GraphT::NodeType*, 32> Visited; + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInVAInfo = + DT.Info[DT.Vertex[DT.Info[VIn].Ancestor]]; + + if (VInVAInfo.Ancestor != 0) + Work.push_back(VIn); + + while (!Work.empty()) { + typename GraphT::NodeType* V = Work.back(); + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo = + DT.Info[V]; + typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Ancestor]; + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VAInfo = + DT.Info[VAncestor]; + + // Process Ancestor first + if (Visited.insert(VAncestor) && + VAInfo.Ancestor != 0) { + Work.push_back(VAncestor); + continue; + } + Work.pop_back(); + + // Update VInfo based on Ancestor info + if (VAInfo.Ancestor == 0) + continue; + typename GraphT::NodeType* VAncestorLabel = VAInfo.Label; + typename GraphT::NodeType* VLabel = VInfo.Label; + if (DT.Info[VAncestorLabel].Semi < DT.Info[VLabel].Semi) + VInfo.Label = VAncestorLabel; + VInfo.Ancestor = VAInfo.Ancestor; + } +} + +template<class GraphT> +typename GraphT::NodeType* Eval(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType *V) { + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo = + DT.Info[V]; +#if !BALANCE_IDOM_TREE + // Higher-complexity but faster implementation + if (VInfo.Ancestor == 0) + return V; + Compress<GraphT>(DT, V); + return VInfo.Label; +#else + // Lower-complexity but slower implementation + if (VInfo.Ancestor == 0) + return VInfo.Label; + Compress<GraphT>(DT, V); + GraphT::NodeType* VLabel = VInfo.Label; + + GraphT::NodeType* VAncestorLabel = DT.Info[VInfo.Ancestor].Label; + if (DT.Info[VAncestorLabel].Semi >= DT.Info[VLabel].Semi) + return VLabel; + else + return VAncestorLabel; +#endif +} + +template<class GraphT> +void Link(DominatorTreeBase<typename GraphT::NodeType>& DT, + unsigned DFSNumV, typename GraphT::NodeType* W, + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo) { +#if !BALANCE_IDOM_TREE + // Higher-complexity but faster implementation + WInfo.Ancestor = DFSNumV; +#else + // Lower-complexity but slower implementation + GraphT::NodeType* WLabel = WInfo.Label; + unsigned WLabelSemi = DT.Info[WLabel].Semi; + GraphT::NodeType* S = W; + InfoRec *SInfo = &DT.Info[S]; + + GraphT::NodeType* SChild = SInfo->Child; + InfoRec *SChildInfo = &DT.Info[SChild]; + + while (WLabelSemi < DT.Info[SChildInfo->Label].Semi) { + GraphT::NodeType* SChildChild = SChildInfo->Child; + if (SInfo->Size+DT.Info[SChildChild].Size >= 2*SChildInfo->Size) { + SChildInfo->Ancestor = S; + SInfo->Child = SChild = SChildChild; + SChildInfo = &DT.Info[SChild]; + } else { + SChildInfo->Size = SInfo->Size; + S = SInfo->Ancestor = SChild; + SInfo = SChildInfo; + SChild = SChildChild; + SChildInfo = &DT.Info[SChild]; + } + } + + DominatorTreeBase::InfoRec &VInfo = DT.Info[V]; + SInfo->Label = WLabel; + + assert(V != W && "The optimization here will not work in this case!"); + unsigned WSize = WInfo.Size; + unsigned VSize = (VInfo.Size += WSize); + + if (VSize < 2*WSize) + std::swap(S, VInfo.Child); + + while (S) { + SInfo = &DT.Info[S]; + SInfo->Ancestor = V; + S = SInfo->Child; + } +#endif +} + +template<class FuncT, class NodeT> +void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT, + FuncT& F) { + typedef GraphTraits<NodeT> GraphT; + + unsigned N = 0; + bool MultipleRoots = (DT.Roots.size() > 1); + if (MultipleRoots) { + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo = + DT.Info[NULL]; + BBInfo.DFSNum = BBInfo.Semi = ++N; + BBInfo.Label = NULL; + + DT.Vertex.push_back(NULL); // Vertex[n] = V; + //BBInfo[V].Ancestor = 0; // Ancestor[n] = 0 + //BBInfo[V].Child = 0; // Child[v] = 0 + BBInfo.Size = 1; // Size[v] = 1 + } + + // Step #1: Number blocks in depth-first order and initialize variables used + // in later stages of the algorithm. + for (unsigned i = 0, e = static_cast<unsigned>(DT.Roots.size()); + i != e; ++i) + N = DFSPass<GraphT>(DT, DT.Roots[i], N); + + // it might be that some blocks did not get a DFS number (e.g., blocks of + // infinite loops). In these cases an artificial exit node is required. + MultipleRoots |= (DT.isPostDominator() && N != F.size()); + + for (unsigned i = N; i >= 2; --i) { + typename GraphT::NodeType* W = DT.Vertex[i]; + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo = + DT.Info[W]; + + // Step #2: Calculate the semidominators of all vertices + bool HasChildOutsideDFS = false; + + // initialize the semi dominator to point to the parent node + WInfo.Semi = WInfo.Parent; + for (typename GraphTraits<Inverse<NodeT> >::ChildIteratorType CI = + GraphTraits<Inverse<NodeT> >::child_begin(W), + E = GraphTraits<Inverse<NodeT> >::child_end(W); CI != E; ++CI) { + if (DT.Info.count(*CI)) { // Only if this predecessor is reachable! + unsigned SemiU = DT.Info[Eval<GraphT>(DT, *CI)].Semi; + if (SemiU < WInfo.Semi) + WInfo.Semi = SemiU; + } + else { + // if the child has no DFS number it is not post-dominated by any exit, + // and so is the current block. + HasChildOutsideDFS = true; + } + } + + // if some child has no DFS number it is not post-dominated by any exit, + // and so is the current block. + if (DT.isPostDominator() && HasChildOutsideDFS) + WInfo.Semi = 0; + + DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W); + + typename GraphT::NodeType* WParent = DT.Vertex[WInfo.Parent]; + Link<GraphT>(DT, WInfo.Parent, W, WInfo); + + // Step #3: Implicitly define the immediate dominator of vertices + std::vector<typename GraphT::NodeType*> &WParentBucket = + DT.Info[WParent].Bucket; + while (!WParentBucket.empty()) { + typename GraphT::NodeType* V = WParentBucket.back(); + WParentBucket.pop_back(); + typename GraphT::NodeType* U = Eval<GraphT>(DT, V); + DT.IDoms[V] = DT.Info[U].Semi < DT.Info[V].Semi ? U : WParent; + } + } + + // Step #4: Explicitly define the immediate dominator of each vertex + for (unsigned i = 2; i <= N; ++i) { + typename GraphT::NodeType* W = DT.Vertex[i]; + typename GraphT::NodeType*& WIDom = DT.IDoms[W]; + if (WIDom != DT.Vertex[DT.Info[W].Semi]) + WIDom = DT.IDoms[WIDom]; + } + + if (DT.Roots.empty()) return; + + // Add a node for the root. This node might be the actual root, if there is + // one exit block, or it may be the virtual exit (denoted by (BasicBlock *)0) + // which postdominates all real exits if there are multiple exit blocks, or + // an infinite loop. + typename GraphT::NodeType* Root = !MultipleRoots ? DT.Roots[0] : 0; + + DT.DomTreeNodes[Root] = DT.RootNode = + new DomTreeNodeBase<typename GraphT::NodeType>(Root, 0); + + // Loop over all of the reachable blocks in the function... + for (unsigned i = 2; i <= N; ++i) { + typename GraphT::NodeType* W = DT.Vertex[i]; + + DomTreeNodeBase<typename GraphT::NodeType> *BBNode = DT.DomTreeNodes[W]; + if (BBNode) continue; // Haven't calculated this node yet? + + typename GraphT::NodeType* ImmDom = DT.getIDom(W); + + assert(ImmDom || DT.DomTreeNodes[NULL]); + + // Get or calculate the node for the immediate dominator + DomTreeNodeBase<typename GraphT::NodeType> *IDomNode = + DT.getNodeForBlock(ImmDom); + + // Add a new tree node for this BasicBlock, and link it as a child of + // IDomNode + DomTreeNodeBase<typename GraphT::NodeType> *C = + new DomTreeNodeBase<typename GraphT::NodeType>(W, IDomNode); + DT.DomTreeNodes[W] = IDomNode->addChild(C); + } + + // Free temporary memory used to construct idom's + DT.IDoms.clear(); + DT.Info.clear(); + std::vector<typename GraphT::NodeType*>().swap(DT.Vertex); + + // FIXME: This does not work on PostDomTrees. It seems likely that this is + // due to an error in the algorithm for post-dominators. This really should + // be investigated and fixed at some point. + // DT.updateDFSNumbers(); + + // Start out with the DFS numbers being invalid. Let them be computed if + // demanded. + DT.DFSInfoValid = false; +} + +} + +#endif diff --git a/include/llvm/Analysis/Dominators.h b/include/llvm/Analysis/Dominators.h new file mode 100644 index 0000000..b405f5b --- /dev/null +++ b/include/llvm/Analysis/Dominators.h @@ -0,0 +1,1055 @@ +//===- llvm/Analysis/Dominators.h - Dominator Info Calculation --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the following classes: +// 1. DominatorTree: Represent dominators as an explicit tree structure. +// 2. DominanceFrontier: Calculate and hold the dominance frontier for a +// function. +// +// These data structures are listed in increasing order of complexity. It +// takes longer to calculate the dominator frontier, for example, than the +// DominatorTree mapping. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_DOMINATORS_H +#define LLVM_ANALYSIS_DOMINATORS_H + +#include "llvm/Pass.h" +#include "llvm/BasicBlock.h" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/Compiler.h" +#include <algorithm> +#include <map> +#include <set> + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// DominatorBase - Base class that other, more interesting dominator analyses +/// inherit from. +/// +template <class NodeT> +class DominatorBase { +protected: + std::vector<NodeT*> Roots; + const bool IsPostDominators; + inline explicit DominatorBase(bool isPostDom) : + Roots(), IsPostDominators(isPostDom) {} +public: + + /// getRoots - Return the root blocks of the current CFG. This may include + /// multiple blocks if we are computing post dominators. For forward + /// dominators, this will always be a single block (the entry node). + /// + inline const std::vector<NodeT*> &getRoots() const { return Roots; } + + /// isPostDominator - Returns true if analysis based of postdoms + /// + bool isPostDominator() const { return IsPostDominators; } +}; + + +//===----------------------------------------------------------------------===// +// DomTreeNode - Dominator Tree Node +template<class NodeT> class DominatorTreeBase; +struct PostDominatorTree; +class MachineBasicBlock; + +template <class NodeT> +class DomTreeNodeBase { + NodeT *TheBB; + DomTreeNodeBase<NodeT> *IDom; + std::vector<DomTreeNodeBase<NodeT> *> Children; + int DFSNumIn, DFSNumOut; + + template<class N> friend class DominatorTreeBase; + friend struct PostDominatorTree; +public: + typedef typename std::vector<DomTreeNodeBase<NodeT> *>::iterator iterator; + typedef typename std::vector<DomTreeNodeBase<NodeT> *>::const_iterator + const_iterator; + + iterator begin() { return Children.begin(); } + iterator end() { return Children.end(); } + const_iterator begin() const { return Children.begin(); } + const_iterator end() const { return Children.end(); } + + NodeT *getBlock() const { return TheBB; } + DomTreeNodeBase<NodeT> *getIDom() const { return IDom; } + const std::vector<DomTreeNodeBase<NodeT>*> &getChildren() const { + return Children; + } + + DomTreeNodeBase(NodeT *BB, DomTreeNodeBase<NodeT> *iDom) + : TheBB(BB), IDom(iDom), DFSNumIn(-1), DFSNumOut(-1) { } + + DomTreeNodeBase<NodeT> *addChild(DomTreeNodeBase<NodeT> *C) { + Children.push_back(C); + return C; + } + + size_t getNumChildren() const { + return Children.size(); + } + + void clearAllChildren() { + Children.clear(); + } + + bool compare(DomTreeNodeBase<NodeT> *Other) { + if (getNumChildren() != Other->getNumChildren()) + return true; + + SmallPtrSet<NodeT *, 4> OtherChildren; + for(iterator I = Other->begin(), E = Other->end(); I != E; ++I) { + NodeT *Nd = (*I)->getBlock(); + OtherChildren.insert(Nd); + } + + for(iterator I = begin(), E = end(); I != E; ++I) { + NodeT *N = (*I)->getBlock(); + if (OtherChildren.count(N) == 0) + return true; + } + return false; + } + + void setIDom(DomTreeNodeBase<NodeT> *NewIDom) { + assert(IDom && "No immediate dominator?"); + if (IDom != NewIDom) { + typename std::vector<DomTreeNodeBase<NodeT>*>::iterator I = + std::find(IDom->Children.begin(), IDom->Children.end(), this); + assert(I != IDom->Children.end() && + "Not in immediate dominator children set!"); + // I am no longer your child... + IDom->Children.erase(I); + + // Switch to new dominator + IDom = NewIDom; + IDom->Children.push_back(this); + } + } + + /// getDFSNumIn/getDFSNumOut - These are an internal implementation detail, do + /// not call them. + unsigned getDFSNumIn() const { return DFSNumIn; } + unsigned getDFSNumOut() const { return DFSNumOut; } +private: + // Return true if this node is dominated by other. Use this only if DFS info + // is valid. + bool DominatedBy(const DomTreeNodeBase<NodeT> *other) const { + return this->DFSNumIn >= other->DFSNumIn && + this->DFSNumOut <= other->DFSNumOut; + } +}; + +EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<BasicBlock>); +EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>); + +template<class NodeT> +static std::ostream &operator<<(std::ostream &o, + const DomTreeNodeBase<NodeT> *Node) { + if (Node->getBlock()) + WriteAsOperand(o, Node->getBlock(), false); + else + o << " <<exit node>>"; + + o << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "}"; + + return o << "\n"; +} + +template<class NodeT> +static void PrintDomTree(const DomTreeNodeBase<NodeT> *N, std::ostream &o, + unsigned Lev) { + o << std::string(2*Lev, ' ') << "[" << Lev << "] " << N; + for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(), + E = N->end(); I != E; ++I) + PrintDomTree<NodeT>(*I, o, Lev+1); +} + +typedef DomTreeNodeBase<BasicBlock> DomTreeNode; + +//===----------------------------------------------------------------------===// +/// DominatorTree - Calculate the immediate dominator tree for a function. +/// + +template<class FuncT, class N> +void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType>& DT, + FuncT& F); + +template<class NodeT> +class DominatorTreeBase : public DominatorBase<NodeT> { +protected: + typedef DenseMap<NodeT*, DomTreeNodeBase<NodeT>*> DomTreeNodeMapType; + DomTreeNodeMapType DomTreeNodes; + DomTreeNodeBase<NodeT> *RootNode; + + bool DFSInfoValid; + unsigned int SlowQueries; + // Information record used during immediate dominators computation. + struct InfoRec { + unsigned DFSNum; + unsigned Semi; + unsigned Size; + NodeT *Label, *Child; + unsigned Parent, Ancestor; + + std::vector<NodeT*> Bucket; + + InfoRec() : DFSNum(0), Semi(0), Size(0), Label(0), Child(0), Parent(0), + Ancestor(0) {} + }; + + DenseMap<NodeT*, NodeT*> IDoms; + + // Vertex - Map the DFS number to the BasicBlock* + std::vector<NodeT*> Vertex; + + // Info - Collection of information used during the computation of idoms. + DenseMap<NodeT*, InfoRec> Info; + + void reset() { + for (typename DomTreeNodeMapType::iterator I = this->DomTreeNodes.begin(), + E = DomTreeNodes.end(); I != E; ++I) + delete I->second; + DomTreeNodes.clear(); + IDoms.clear(); + this->Roots.clear(); + Vertex.clear(); + RootNode = 0; + } + + // NewBB is split and now it has one successor. Update dominator tree to + // reflect this change. + template<class N, class GraphT> + void Split(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType* NewBB) { + assert(std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 + && "NewBB should have a single successor!"); + typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB); + + std::vector<typename GraphT::NodeType*> PredBlocks; + for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI = + GraphTraits<Inverse<N> >::child_begin(NewBB), + PE = GraphTraits<Inverse<N> >::child_end(NewBB); PI != PE; ++PI) + PredBlocks.push_back(*PI); + + assert(!PredBlocks.empty() && "No predblocks??"); + + bool NewBBDominatesNewBBSucc = true; + for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI = + GraphTraits<Inverse<N> >::child_begin(NewBBSucc), + E = GraphTraits<Inverse<N> >::child_end(NewBBSucc); PI != E; ++PI) + if (*PI != NewBB && !DT.dominates(NewBBSucc, *PI) && + DT.isReachableFromEntry(*PI)) { + NewBBDominatesNewBBSucc = false; + break; + } + + // Find NewBB's immediate dominator and create new dominator tree node for + // NewBB. + NodeT *NewBBIDom = 0; + unsigned i = 0; + for (i = 0; i < PredBlocks.size(); ++i) + if (DT.isReachableFromEntry(PredBlocks[i])) { + NewBBIDom = PredBlocks[i]; + break; + } + assert(i != PredBlocks.size() && "No reachable preds?"); + for (i = i + 1; i < PredBlocks.size(); ++i) { + if (DT.isReachableFromEntry(PredBlocks[i])) + NewBBIDom = DT.findNearestCommonDominator(NewBBIDom, PredBlocks[i]); + } + assert(NewBBIDom && "No immediate dominator found??"); + + // Create the new dominator tree node... and set the idom of NewBB. + DomTreeNodeBase<NodeT> *NewBBNode = DT.addNewBlock(NewBB, NewBBIDom); + + // If NewBB strictly dominates other blocks, then it is now the immediate + // dominator of NewBBSucc. Update the dominator tree as appropriate. + if (NewBBDominatesNewBBSucc) { + DomTreeNodeBase<NodeT> *NewBBSuccNode = DT.getNode(NewBBSucc); + DT.changeImmediateDominator(NewBBSuccNode, NewBBNode); + } + } + +public: + explicit DominatorTreeBase(bool isPostDom) + : DominatorBase<NodeT>(isPostDom), DFSInfoValid(false), SlowQueries(0) {} + virtual ~DominatorTreeBase() { reset(); } + + // FIXME: Should remove this + virtual bool runOnFunction(Function &F) { return false; } + + /// compare - Return false if the other dominator tree base matches this + /// dominator tree base. Otherwise return true. + bool compare(DominatorTreeBase &Other) const { + + const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes; + if (DomTreeNodes.size() != OtherDomTreeNodes.size()) + return true; + + SmallPtrSet<const NodeT *,4> MyBBs; + for (typename DomTreeNodeMapType::const_iterator + I = this->DomTreeNodes.begin(), + E = this->DomTreeNodes.end(); I != E; ++I) { + NodeT *BB = I->first; + typename DomTreeNodeMapType::const_iterator OI = OtherDomTreeNodes.find(BB); + if (OI == OtherDomTreeNodes.end()) + return true; + + DomTreeNodeBase<NodeT>* MyNd = I->second; + DomTreeNodeBase<NodeT>* OtherNd = OI->second; + + if (MyNd->compare(OtherNd)) + return true; + } + + return false; + } + + virtual void releaseMemory() { reset(); } + + /// getNode - return the (Post)DominatorTree node for the specified basic + /// block. This is the same as using operator[] on this class. + /// + inline DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const { + typename DomTreeNodeMapType::const_iterator I = DomTreeNodes.find(BB); + return I != DomTreeNodes.end() ? I->second : 0; + } + + /// getRootNode - This returns the entry node for the CFG of the function. If + /// this tree represents the post-dominance relations for a function, however, + /// this root may be a node with the block == NULL. This is the case when + /// there are multiple exit nodes from a particular function. Consumers of + /// post-dominance information must be capable of dealing with this + /// possibility. + /// + DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; } + const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; } + + /// properlyDominates - Returns true iff this dominates N and this != N. + /// Note that this is not a constant time operation! + /// + bool properlyDominates(const DomTreeNodeBase<NodeT> *A, + DomTreeNodeBase<NodeT> *B) const { + if (A == 0 || B == 0) return false; + return dominatedBySlowTreeWalk(A, B); + } + + inline bool properlyDominates(NodeT *A, NodeT *B) { + return properlyDominates(getNode(A), getNode(B)); + } + + bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A, + const DomTreeNodeBase<NodeT> *B) const { + const DomTreeNodeBase<NodeT> *IDom; + if (A == 0 || B == 0) return false; + while ((IDom = B->getIDom()) != 0 && IDom != A && IDom != B) + B = IDom; // Walk up the tree + return IDom != 0; + } + + + /// isReachableFromEntry - Return true if A is dominated by the entry + /// block of the function containing it. + bool isReachableFromEntry(NodeT* A) { + assert (!this->isPostDominator() + && "This is not implemented for post dominators"); + return dominates(&A->getParent()->front(), A); + } + + /// dominates - Returns true iff A dominates B. Note that this is not a + /// constant time operation! + /// + inline bool dominates(const DomTreeNodeBase<NodeT> *A, + DomTreeNodeBase<NodeT> *B) { + if (B == A) + return true; // A node trivially dominates itself. + + if (A == 0 || B == 0) + return false; + + if (DFSInfoValid) + return B->DominatedBy(A); + + // If we end up with too many slow queries, just update the + // DFS numbers on the theory that we are going to keep querying. + SlowQueries++; + if (SlowQueries > 32) { + updateDFSNumbers(); + return B->DominatedBy(A); + } + + return dominatedBySlowTreeWalk(A, B); + } + + inline bool dominates(NodeT *A, NodeT *B) { + if (A == B) + return true; + + return dominates(getNode(A), getNode(B)); + } + + NodeT *getRoot() const { + assert(this->Roots.size() == 1 && "Should always have entry node!"); + return this->Roots[0]; + } + + /// findNearestCommonDominator - Find nearest common dominator basic block + /// for basic block A and B. If there is no such block then return NULL. + NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) { + + assert (!this->isPostDominator() + && "This is not implemented for post dominators"); + assert (A->getParent() == B->getParent() + && "Two blocks are not in same function"); + + // If either A or B is a entry block then it is nearest common dominator. + NodeT &Entry = A->getParent()->front(); + if (A == &Entry || B == &Entry) + return &Entry; + + // If B dominates A then B is nearest common dominator. + if (dominates(B, A)) + return B; + + // If A dominates B then A is nearest common dominator. + if (dominates(A, B)) + return A; + + DomTreeNodeBase<NodeT> *NodeA = getNode(A); + DomTreeNodeBase<NodeT> *NodeB = getNode(B); + + // Collect NodeA dominators set. + SmallPtrSet<DomTreeNodeBase<NodeT>*, 16> NodeADoms; + NodeADoms.insert(NodeA); + DomTreeNodeBase<NodeT> *IDomA = NodeA->getIDom(); + while (IDomA) { + NodeADoms.insert(IDomA); + IDomA = IDomA->getIDom(); + } + + // Walk NodeB immediate dominators chain and find common dominator node. + DomTreeNodeBase<NodeT> *IDomB = NodeB->getIDom(); + while(IDomB) { + if (NodeADoms.count(IDomB) != 0) + return IDomB->getBlock(); + + IDomB = IDomB->getIDom(); + } + + return NULL; + } + + //===--------------------------------------------------------------------===// + // API to update (Post)DominatorTree information based on modifications to + // the CFG... + + /// addNewBlock - Add a new node to the dominator tree information. This + /// creates a new node as a child of DomBB dominator node,linking it into + /// the children list of the immediate dominator. + DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) { + assert(getNode(BB) == 0 && "Block already in dominator tree!"); + DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB); + assert(IDomNode && "Not immediate dominator specified for block!"); + DFSInfoValid = false; + return DomTreeNodes[BB] = + IDomNode->addChild(new DomTreeNodeBase<NodeT>(BB, IDomNode)); + } + + /// changeImmediateDominator - This method is used to update the dominator + /// tree information when a node's immediate dominator changes. + /// + void changeImmediateDominator(DomTreeNodeBase<NodeT> *N, + DomTreeNodeBase<NodeT> *NewIDom) { + assert(N && NewIDom && "Cannot change null node pointers!"); + DFSInfoValid = false; + N->setIDom(NewIDom); + } + + void changeImmediateDominator(NodeT *BB, NodeT *NewBB) { + changeImmediateDominator(getNode(BB), getNode(NewBB)); + } + + /// eraseNode - Removes a node from the dominator tree. Block must not + /// domiante any other blocks. Removes node from its immediate dominator's + /// children list. Deletes dominator node associated with basic block BB. + void eraseNode(NodeT *BB) { + DomTreeNodeBase<NodeT> *Node = getNode(BB); + assert (Node && "Removing node that isn't in dominator tree."); + assert (Node->getChildren().empty() && "Node is not a leaf node."); + + // Remove node from immediate dominator's children list. + DomTreeNodeBase<NodeT> *IDom = Node->getIDom(); + if (IDom) { + typename std::vector<DomTreeNodeBase<NodeT>*>::iterator I = + std::find(IDom->Children.begin(), IDom->Children.end(), Node); + assert(I != IDom->Children.end() && + "Not in immediate dominator children set!"); + // I am no longer your child... + IDom->Children.erase(I); + } + + DomTreeNodes.erase(BB); + delete Node; + } + + /// removeNode - Removes a node from the dominator tree. Block must not + /// dominate any other blocks. Invalidates any node pointing to removed + /// block. + void removeNode(NodeT *BB) { + assert(getNode(BB) && "Removing node that isn't in dominator tree."); + DomTreeNodes.erase(BB); + } + + /// splitBlock - BB is split and now it has one successor. Update dominator + /// tree to reflect this change. + void splitBlock(NodeT* NewBB) { + if (this->IsPostDominators) + this->Split<Inverse<NodeT*>, GraphTraits<Inverse<NodeT*> > >(*this, NewBB); + else + this->Split<NodeT*, GraphTraits<NodeT*> >(*this, NewBB); + } + + /// print - Convert to human readable form + /// + virtual void print(std::ostream &o, const Module* ) const { + o << "=============================--------------------------------\n"; + if (this->isPostDominator()) + o << "Inorder PostDominator Tree: "; + else + o << "Inorder Dominator Tree: "; + if (this->DFSInfoValid) + o << "DFSNumbers invalid: " << SlowQueries << " slow queries."; + o << "\n"; + + PrintDomTree<NodeT>(getRootNode(), o, 1); + } + + void print(std::ostream *OS, const Module* M = 0) const { + if (OS) print(*OS, M); + } + + virtual void dump() { + print(llvm::cerr); + } + +protected: + template<class GraphT> + friend void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType* VIn); + + template<class GraphT> + friend typename GraphT::NodeType* Eval( + DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType* V); + + template<class GraphT> + friend void Link(DominatorTreeBase<typename GraphT::NodeType>& DT, + unsigned DFSNumV, typename GraphT::NodeType* W, + typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo); + + template<class GraphT> + friend unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT, + typename GraphT::NodeType* V, + unsigned N); + + template<class FuncT, class N> + friend void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType>& DT, + FuncT& F); + + /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking + /// dominator tree in dfs order. + void updateDFSNumbers() { + unsigned DFSNum = 0; + + SmallVector<std::pair<DomTreeNodeBase<NodeT>*, + typename DomTreeNodeBase<NodeT>::iterator>, 32> WorkStack; + + for (unsigned i = 0, e = (unsigned)this->Roots.size(); i != e; ++i) { + DomTreeNodeBase<NodeT> *ThisRoot = getNode(this->Roots[i]); + WorkStack.push_back(std::make_pair(ThisRoot, ThisRoot->begin())); + ThisRoot->DFSNumIn = DFSNum++; + + while (!WorkStack.empty()) { + DomTreeNodeBase<NodeT> *Node = WorkStack.back().first; + typename DomTreeNodeBase<NodeT>::iterator ChildIt = + WorkStack.back().second; + + // If we visited all of the children of this node, "recurse" back up the + // stack setting the DFOutNum. + if (ChildIt == Node->end()) { + Node->DFSNumOut = DFSNum++; + WorkStack.pop_back(); + } else { + // Otherwise, recursively visit this child. + DomTreeNodeBase<NodeT> *Child = *ChildIt; + ++WorkStack.back().second; + + WorkStack.push_back(std::make_pair(Child, Child->begin())); + Child->DFSNumIn = DFSNum++; + } + } + } + + SlowQueries = 0; + DFSInfoValid = true; + } + + DomTreeNodeBase<NodeT> *getNodeForBlock(NodeT *BB) { + if (DomTreeNodeBase<NodeT> *BBNode = this->DomTreeNodes[BB]) + return BBNode; + + // Haven't calculated this node yet? Get or calculate the node for the + // immediate dominator. + NodeT *IDom = getIDom(BB); + + assert(IDom || this->DomTreeNodes[NULL]); + DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom); + + // Add a new tree node for this BasicBlock, and link it as a child of + // IDomNode + DomTreeNodeBase<NodeT> *C = new DomTreeNodeBase<NodeT>(BB, IDomNode); + return this->DomTreeNodes[BB] = IDomNode->addChild(C); + } + + inline NodeT *getIDom(NodeT *BB) const { + typename DenseMap<NodeT*, NodeT*>::const_iterator I = IDoms.find(BB); + return I != IDoms.end() ? I->second : 0; + } + + inline void addRoot(NodeT* BB) { + this->Roots.push_back(BB); + } + +public: + /// recalculate - compute a dominator tree for the given function + template<class FT> + void recalculate(FT& F) { + if (!this->IsPostDominators) { + reset(); + + // Initialize roots + this->Roots.push_back(&F.front()); + this->IDoms[&F.front()] = 0; + this->DomTreeNodes[&F.front()] = 0; + this->Vertex.push_back(0); + + Calculate<FT, NodeT*>(*this, F); + + updateDFSNumbers(); + } else { + reset(); // Reset from the last time we were run... + + // Initialize the roots list + for (typename FT::iterator I = F.begin(), E = F.end(); I != E; ++I) { + if (std::distance(GraphTraits<FT*>::child_begin(I), + GraphTraits<FT*>::child_end(I)) == 0) + addRoot(I); + + // Prepopulate maps so that we don't get iterator invalidation issues later. + this->IDoms[I] = 0; + this->DomTreeNodes[I] = 0; + } + + this->Vertex.push_back(0); + + Calculate<FT, Inverse<NodeT*> >(*this, F); + } + } +}; + +EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<BasicBlock>); + +//===------------------------------------- +/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to +/// compute a normal dominator tree. +/// +class DominatorTree : public FunctionPass { +public: + static char ID; // Pass ID, replacement for typeid + DominatorTreeBase<BasicBlock>* DT; + + DominatorTree() : FunctionPass(&ID) { + DT = new DominatorTreeBase<BasicBlock>(false); + } + + ~DominatorTree() { + DT->releaseMemory(); + delete DT; + } + + DominatorTreeBase<BasicBlock>& getBase() { return *DT; } + + /// getRoots - Return the root blocks of the current CFG. This may include + /// multiple blocks if we are computing post dominators. For forward + /// dominators, this will always be a single block (the entry node). + /// + inline const std::vector<BasicBlock*> &getRoots() const { + return DT->getRoots(); + } + + inline BasicBlock *getRoot() const { + return DT->getRoot(); + } + + inline DomTreeNode *getRootNode() const { + return DT->getRootNode(); + } + + /// compare - Return false if the other dominator tree matches this + /// dominator tree. Otherwise return true. + inline bool compare(DominatorTree &Other) const { + DomTreeNode *R = getRootNode(); + DomTreeNode *OtherR = Other.getRootNode(); + + if (!R || !OtherR || R->getBlock() != OtherR->getBlock()) + return true; + + if (DT->compare(Other.getBase())) + return true; + + return false; + } + + virtual bool runOnFunction(Function &F); + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + + inline bool dominates(DomTreeNode* A, DomTreeNode* B) const { + return DT->dominates(A, B); + } + + inline bool dominates(BasicBlock* A, BasicBlock* B) const { + return DT->dominates(A, B); + } + + // dominates - Return true if A dominates B. This performs the + // special checks necessary if A and B are in the same basic block. + bool dominates(Instruction *A, Instruction *B) const { + BasicBlock *BBA = A->getParent(), *BBB = B->getParent(); + if (BBA != BBB) return DT->dominates(BBA, BBB); + + // It is not possible to determine dominance between two PHI nodes + // based on their ordering. + if (isa<PHINode>(A) && isa<PHINode>(B)) + return false; + + // Loop through the basic block until we find A or B. + BasicBlock::iterator I = BBA->begin(); + for (; &*I != A && &*I != B; ++I) /*empty*/; + + //if(!DT.IsPostDominators) { + // A dominates B if it is found first in the basic block. + return &*I == A; + //} else { + // // A post-dominates B if B is found first in the basic block. + // return &*I == B; + //} + } + + inline bool properlyDominates(const DomTreeNode* A, DomTreeNode* B) const { + return DT->properlyDominates(A, B); + } + + inline bool properlyDominates(BasicBlock* A, BasicBlock* B) const { + return DT->properlyDominates(A, B); + } + + /// findNearestCommonDominator - Find nearest common dominator basic block + /// for basic block A and B. If there is no such block then return NULL. + inline BasicBlock *findNearestCommonDominator(BasicBlock *A, BasicBlock *B) { + return DT->findNearestCommonDominator(A, B); + } + + inline DomTreeNode *operator[](BasicBlock *BB) const { + return DT->getNode(BB); + } + + /// getNode - return the (Post)DominatorTree node for the specified basic + /// block. This is the same as using operator[] on this class. + /// + inline DomTreeNode *getNode(BasicBlock *BB) const { + return DT->getNode(BB); + } + + /// addNewBlock - Add a new node to the dominator tree information. This + /// creates a new node as a child of DomBB dominator node,linking it into + /// the children list of the immediate dominator. + inline DomTreeNode *addNewBlock(BasicBlock *BB, BasicBlock *DomBB) { + return DT->addNewBlock(BB, DomBB); + } + + /// changeImmediateDominator - This method is used to update the dominator + /// tree information when a node's immediate dominator changes. + /// + inline void changeImmediateDominator(BasicBlock *N, BasicBlock* NewIDom) { + DT->changeImmediateDominator(N, NewIDom); + } + + inline void changeImmediateDominator(DomTreeNode *N, DomTreeNode* NewIDom) { + DT->changeImmediateDominator(N, NewIDom); + } + + /// eraseNode - Removes a node from the dominator tree. Block must not + /// domiante any other blocks. Removes node from its immediate dominator's + /// children list. Deletes dominator node associated with basic block BB. + inline void eraseNode(BasicBlock *BB) { + DT->eraseNode(BB); + } + + /// splitBlock - BB is split and now it has one successor. Update dominator + /// tree to reflect this change. + inline void splitBlock(BasicBlock* NewBB) { + DT->splitBlock(NewBB); + } + + bool isReachableFromEntry(BasicBlock* A) { + return DT->isReachableFromEntry(A); + } + + + virtual void releaseMemory() { + DT->releaseMemory(); + } + + virtual void print(std::ostream &OS, const Module* M= 0) const { + DT->print(OS, M); + } +}; + +//===------------------------------------- +/// DominatorTree GraphTraits specialization so the DominatorTree can be +/// iterable by generic graph iterators. +/// +template <> struct GraphTraits<DomTreeNode *> { + typedef DomTreeNode NodeType; + typedef NodeType::iterator ChildIteratorType; + + static NodeType *getEntryNode(NodeType *N) { + return N; + } + static inline ChildIteratorType child_begin(NodeType* N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType* N) { + return N->end(); + } +}; + +template <> struct GraphTraits<DominatorTree*> + : public GraphTraits<DomTreeNode *> { + static NodeType *getEntryNode(DominatorTree *DT) { + return DT->getRootNode(); + } +}; + + +//===----------------------------------------------------------------------===// +/// DominanceFrontierBase - Common base class for computing forward and inverse +/// dominance frontiers for a function. +/// +class DominanceFrontierBase : public FunctionPass { +public: + typedef std::set<BasicBlock*> DomSetType; // Dom set for a bb + typedef std::map<BasicBlock*, DomSetType> DomSetMapType; // Dom set map +protected: + DomSetMapType Frontiers; + std::vector<BasicBlock*> Roots; + const bool IsPostDominators; + +public: + DominanceFrontierBase(void *ID, bool isPostDom) + : FunctionPass(ID), IsPostDominators(isPostDom) {} + + /// getRoots - Return the root blocks of the current CFG. This may include + /// multiple blocks if we are computing post dominators. For forward + /// dominators, this will always be a single block (the entry node). + /// + inline const std::vector<BasicBlock*> &getRoots() const { return Roots; } + + /// isPostDominator - Returns true if analysis based of postdoms + /// + bool isPostDominator() const { return IsPostDominators; } + + virtual void releaseMemory() { Frontiers.clear(); } + + // Accessor interface: + typedef DomSetMapType::iterator iterator; + typedef DomSetMapType::const_iterator const_iterator; + iterator begin() { return Frontiers.begin(); } + const_iterator begin() const { return Frontiers.begin(); } + iterator end() { return Frontiers.end(); } + const_iterator end() const { return Frontiers.end(); } + iterator find(BasicBlock *B) { return Frontiers.find(B); } + const_iterator find(BasicBlock *B) const { return Frontiers.find(B); } + + void addBasicBlock(BasicBlock *BB, const DomSetType &frontier) { + assert(find(BB) == end() && "Block already in DominanceFrontier!"); + Frontiers.insert(std::make_pair(BB, frontier)); + } + + /// removeBlock - Remove basic block BB's frontier. + void removeBlock(BasicBlock *BB) { + assert(find(BB) != end() && "Block is not in DominanceFrontier!"); + for (iterator I = begin(), E = end(); I != E; ++I) + I->second.erase(BB); + Frontiers.erase(BB); + } + + void addToFrontier(iterator I, BasicBlock *Node) { + assert(I != end() && "BB is not in DominanceFrontier!"); + I->second.insert(Node); + } + + void removeFromFrontier(iterator I, BasicBlock *Node) { + assert(I != end() && "BB is not in DominanceFrontier!"); + assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB"); + I->second.erase(Node); + } + + /// compareDomSet - Return false if two domsets match. Otherwise + /// return true; + bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const { + std::set<BasicBlock *> tmpSet; + for (DomSetType::const_iterator I = DS2.begin(), + E = DS2.end(); I != E; ++I) + tmpSet.insert(*I); + + for (DomSetType::const_iterator I = DS1.begin(), + E = DS1.end(); I != E; ) { + BasicBlock *Node = *I++; + + if (tmpSet.erase(Node) == 0) + // Node is in DS1 but not in DS2. + return true; + } + + if(!tmpSet.empty()) + // There are nodes that are in DS2 but not in DS1. + return true; + + // DS1 and DS2 matches. + return false; + } + + /// compare - Return true if the other dominance frontier base matches + /// this dominance frontier base. Otherwise return false. + bool compare(DominanceFrontierBase &Other) const { + DomSetMapType tmpFrontiers; + for (DomSetMapType::const_iterator I = Other.begin(), + E = Other.end(); I != E; ++I) + tmpFrontiers.insert(std::make_pair(I->first, I->second)); + + for (DomSetMapType::iterator I = tmpFrontiers.begin(), + E = tmpFrontiers.end(); I != E; ) { + BasicBlock *Node = I->first; + const_iterator DFI = find(Node); + if (DFI == end()) + return true; + + if (compareDomSet(I->second, DFI->second)) + return true; + + ++I; + tmpFrontiers.erase(Node); + } + + if (!tmpFrontiers.empty()) + return true; + + return false; + } + + /// print - Convert to human readable form + /// + virtual void print(std::ostream &OS, const Module* = 0) const; + void print(std::ostream *OS, const Module* M = 0) const { + if (OS) print(*OS, M); + } + virtual void dump(); +}; + + +//===------------------------------------- +/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is +/// used to compute a forward dominator frontiers. +/// +class DominanceFrontier : public DominanceFrontierBase { +public: + static char ID; // Pass ID, replacement for typeid + DominanceFrontier() : + DominanceFrontierBase(&ID, false) {} + + BasicBlock *getRoot() const { + assert(Roots.size() == 1 && "Should always have entry node!"); + return Roots[0]; + } + + virtual bool runOnFunction(Function &) { + Frontiers.clear(); + DominatorTree &DT = getAnalysis<DominatorTree>(); + Roots = DT.getRoots(); + assert(Roots.size() == 1 && "Only one entry block for forward domfronts!"); + calculate(DT, DT[Roots[0]]); + return false; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + AU.addRequired<DominatorTree>(); + } + + /// splitBlock - BB is split and now it has one successor. Update dominance + /// frontier to reflect this change. + void splitBlock(BasicBlock *BB); + + /// BasicBlock BB's new dominator is NewBB. Update BB's dominance frontier + /// to reflect this change. + void changeImmediateDominator(BasicBlock *BB, BasicBlock *NewBB, + DominatorTree *DT) { + // NewBB is now dominating BB. Which means BB's dominance + // frontier is now part of NewBB's dominance frontier. However, BB + // itself is not member of NewBB's dominance frontier. + DominanceFrontier::iterator NewDFI = find(NewBB); + DominanceFrontier::iterator DFI = find(BB); + // If BB was an entry block then its frontier is empty. + if (DFI == end()) + return; + DominanceFrontier::DomSetType BBSet = DFI->second; + for (DominanceFrontier::DomSetType::iterator BBSetI = BBSet.begin(), + BBSetE = BBSet.end(); BBSetI != BBSetE; ++BBSetI) { + BasicBlock *DFMember = *BBSetI; + // Insert only if NewBB dominates DFMember. + if (!DT->dominates(NewBB, DFMember)) + NewDFI->second.insert(DFMember); + } + NewDFI->second.erase(BB); + } + + const DomSetType &calculate(const DominatorTree &DT, + const DomTreeNode *Node); +}; + + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/FindUsedTypes.h b/include/llvm/Analysis/FindUsedTypes.h new file mode 100644 index 0000000..c897af3 --- /dev/null +++ b/include/llvm/Analysis/FindUsedTypes.h @@ -0,0 +1,65 @@ +//===- llvm/Analysis/FindUsedTypes.h - Find all Types in use ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass is used to seek out all of the types in use by the program. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_FINDUSEDTYPES_H +#define LLVM_ANALYSIS_FINDUSEDTYPES_H + +#include "llvm/Pass.h" +#include <set> + +namespace llvm { + +class Type; +class Value; + +class FindUsedTypes : public ModulePass { + std::set<const Type *> UsedTypes; +public: + static char ID; // Pass identification, replacement for typeid + FindUsedTypes() : ModulePass(&ID) {} + + /// getTypes - After the pass has been run, return the set containing all of + /// the types used in the module. + /// + const std::set<const Type *> &getTypes() const { return UsedTypes; } + + /// Print the types found in the module. If the optional Module parameter is + /// passed in, then the types are printed symbolically if possible, using the + /// symbol table from the module. + /// + void print(std::ostream &o, const Module *M) const; + void print(std::ostream *o, const Module *M) const { if (o) print(*o, M); } + +private: + /// IncorporateType - Incorporate one type and all of its subtypes into the + /// collection of used types. + /// + void IncorporateType(const Type *Ty); + + /// IncorporateValue - Incorporate all of the types used by this value. + /// + void IncorporateValue(const Value *V); + +public: + /// run - This incorporates all types used by the specified module + bool runOnModule(Module &M); + + /// getAnalysisUsage - We do not modify anything. + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h new file mode 100644 index 0000000..36ff07b --- /dev/null +++ b/include/llvm/Analysis/IVUsers.h @@ -0,0 +1,235 @@ +//===- llvm/Analysis/IVUsers.h - Induction Variable Users -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements bookkeeping for "interesting" users of expressions +// computed from induction variables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_IVUSERS_H +#define LLVM_ANALYSIS_IVUSERS_H + +#include "llvm/Analysis/LoopPass.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include <llvm/ADT/SmallVector.h> +#include <map> + +namespace llvm { + +class DominatorTree; +class Instruction; +class Value; +class IVUsersOfOneStride; + +/// IVStrideUse - Keep track of one use of a strided induction variable, where +/// the stride is stored externally. The Offset member keeps track of the +/// offset from the IV, User is the actual user of the operand, and +/// 'OperandValToReplace' is the operand of the User that is the use. +class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> { +public: + IVStrideUse(IVUsersOfOneStride *parent, + const SCEVHandle &offset, + Instruction* U, Value *O, bool issigned) + : CallbackVH(U), Parent(parent), Offset(offset), + OperandValToReplace(O), IsSigned(issigned), + IsUseOfPostIncrementedValue(false) { + } + + /// getUser - Return the user instruction for this use. + Instruction *getUser() const { + return cast<Instruction>(getValPtr()); + } + + /// setUser - Assign a new user instruction for this use. + void setUser(Instruction *NewUser) { + setValPtr(NewUser); + } + + /// getParent - Return a pointer to the IVUsersOfOneStride that owns + /// this IVStrideUse. + IVUsersOfOneStride *getParent() const { return Parent; } + + /// getOffset - Return the offset to add to a theoeretical induction + /// variable that starts at zero and counts up by the stride to compute + /// the value for the use. This always has the same type as the stride, + /// which may need to be casted to match the type of the use. + SCEVHandle getOffset() const { return Offset; } + + /// setOffset - Assign a new offset to this use. + void setOffset(SCEVHandle Val) { + Offset = Val; + } + + /// getOperandValToReplace - Return the Value of the operand in the user + /// instruction that this IVStrideUse is representing. + Value *getOperandValToReplace() const { + return OperandValToReplace; + } + + /// setOperandValToReplace - Assign a new Value as the operand value + /// to replace. + void setOperandValToReplace(Value *Op) { + OperandValToReplace = Op; + } + + /// isSigned - The stride (and thus also the Offset) of this use may be in + /// a narrower type than the use itself (OperandValToReplace->getType()). + /// When this is the case, isSigned() indicates whether the IV expression + /// should be signed-extended instead of zero-extended to fit the type of + /// the use. + bool isSigned() const { return IsSigned; } + + /// isUseOfPostIncrementedValue - True if this should use the + /// post-incremented version of this IV, not the preincremented version. + /// This can only be set in special cases, such as the terminating setcc + /// instruction for a loop or uses dominated by the loop. + bool isUseOfPostIncrementedValue() const { + return IsUseOfPostIncrementedValue; + } + + /// setIsUseOfPostIncrmentedValue - set the flag that indicates whether + /// this is a post-increment use. + void setIsUseOfPostIncrementedValue(bool Val) { + IsUseOfPostIncrementedValue = Val; + } + +private: + /// Parent - a pointer to the IVUsersOfOneStride that owns this IVStrideUse. + IVUsersOfOneStride *Parent; + + /// Offset - The offset to add to the base induction expression. + SCEVHandle Offset; + + /// OperandValToReplace - The Value of the operand in the user instruction + /// that this IVStrideUse is representing. + WeakVH OperandValToReplace; + + /// IsSigned - Determines whether the replacement value is sign or + /// zero extended to the type of the use. + bool IsSigned; + + /// IsUseOfPostIncrementedValue - True if this should use the + /// post-incremented version of this IV, not the preincremented version. + bool IsUseOfPostIncrementedValue; + + /// Deleted - Implementation of CallbackVH virtual function to + /// recieve notification when the User is deleted. + virtual void deleted(); +}; + +template<> struct ilist_traits<IVStrideUse> + : public ilist_default_traits<IVStrideUse> { + // createSentinel is used to get hold of a node that marks the end of + // the list... + // The sentinel is relative to this instance, so we use a non-static + // method. + IVStrideUse *createSentinel() const { + // since i(p)lists always publicly derive from the corresponding + // traits, placing a data member in this class will augment i(p)list. + // But since the NodeTy is expected to publicly derive from + // ilist_node<NodeTy>, there is a legal viable downcast from it + // to NodeTy. We use this trick to superpose i(p)list with a "ghostly" + // NodeTy, which becomes the sentinel. Dereferencing the sentinel is + // forbidden (save the ilist_node<NodeTy>) so no one will ever notice + // the superposition. + return static_cast<IVStrideUse*>(&Sentinel); + } + static void destroySentinel(IVStrideUse*) {} + + IVStrideUse *provideInitialHead() const { return createSentinel(); } + IVStrideUse *ensureHead(IVStrideUse*) const { return createSentinel(); } + static void noteHead(IVStrideUse*, IVStrideUse*) {} + +private: + mutable ilist_node<IVStrideUse> Sentinel; +}; + +/// IVUsersOfOneStride - This structure keeps track of all instructions that +/// have an operand that is based on the trip count multiplied by some stride. +struct IVUsersOfOneStride : public ilist_node<IVUsersOfOneStride> { +private: + IVUsersOfOneStride(const IVUsersOfOneStride &I); // do not implement + void operator=(const IVUsersOfOneStride &I); // do not implement + +public: + IVUsersOfOneStride() : Stride(0) {} + + explicit IVUsersOfOneStride(const SCEV *stride) : Stride(stride) {} + + /// Stride - The stride for all the contained IVStrideUses. This is + /// a constant for affine strides. + const SCEV *Stride; + + /// Users - Keep track of all of the users of this stride as well as the + /// initial value and the operand that uses the IV. + ilist<IVStrideUse> Users; + + void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand, + bool isSigned) { + Users.push_back(new IVStrideUse(this, Offset, User, Operand, isSigned)); + } +}; + +class IVUsers : public LoopPass { + friend class IVStrideUserVH; + Loop *L; + LoopInfo *LI; + DominatorTree *DT; + ScalarEvolution *SE; + SmallPtrSet<Instruction*,16> Processed; + +public: + /// IVUses - A list of all tracked IV uses of induction variable expressions + /// we are interested in. + ilist<IVUsersOfOneStride> IVUses; + + /// IVUsesByStride - A mapping from the strides in StrideOrder to the + /// uses in IVUses. + std::map<SCEVHandle, IVUsersOfOneStride*> IVUsesByStride; + + /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable: + /// We use this to iterate over the IVUsesByStride collection without being + /// dependent on random ordering of pointers in the process. + SmallVector<SCEVHandle, 16> StrideOrder; + +private: + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + virtual bool runOnLoop(Loop *L, LPPassManager &LPM); + + virtual void releaseMemory(); + +public: + static char ID; // Pass ID, replacement for typeid + IVUsers(); + + /// AddUsersIfInteresting - Inspect the specified Instruction. If it is a + /// reducible SCEV, recursively add its users to the IVUsesByStride set and + /// return true. Otherwise, return false. + bool AddUsersIfInteresting(Instruction *I); + + /// getReplacementExpr - Return a SCEV expression which computes the + /// value of the OperandValToReplace of the given IVStrideUse. + SCEVHandle getReplacementExpr(const IVStrideUse &U) const; + + void print(raw_ostream &OS, const Module* = 0) const; + virtual void print(std::ostream &OS, const Module* = 0) const; + void print(std::ostream *OS, const Module* M = 0) const { + if (OS) print(*OS, M); + } + + /// dump - This method is used for debugging. + void dump() const; +}; + +Pass *createIVUsersPass(); + +} + +#endif diff --git a/include/llvm/Analysis/Interval.h b/include/llvm/Analysis/Interval.h new file mode 100644 index 0000000..1da2022 --- /dev/null +++ b/include/llvm/Analysis/Interval.h @@ -0,0 +1,154 @@ +//===- llvm/Analysis/Interval.h - Interval Class Declaration ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the Interval class, which +// represents a set of CFG nodes and is a portion of an interval partition. +// +// Intervals have some interesting and useful properties, including the +// following: +// 1. The header node of an interval dominates all of the elements of the +// interval +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INTERVAL_H +#define LLVM_INTERVAL_H + +#include "llvm/ADT/GraphTraits.h" +#include <vector> +#include <iosfwd> + +namespace llvm { + +class BasicBlock; + +//===----------------------------------------------------------------------===// +// +/// Interval Class - An Interval is a set of nodes defined such that every node +/// in the interval has all of its predecessors in the interval (except for the +/// header) +/// +class Interval { + /// HeaderNode - The header BasicBlock, which dominates all BasicBlocks in this + /// interval. Also, any loops in this interval must go through the HeaderNode. + /// + BasicBlock *HeaderNode; +public: + typedef std::vector<BasicBlock*>::iterator succ_iterator; + typedef std::vector<BasicBlock*>::iterator pred_iterator; + typedef std::vector<BasicBlock*>::iterator node_iterator; + + inline Interval(BasicBlock *Header) : HeaderNode(Header) { + Nodes.push_back(Header); + } + + inline Interval(const Interval &I) // copy ctor + : HeaderNode(I.HeaderNode), Nodes(I.Nodes), Successors(I.Successors) {} + + inline BasicBlock *getHeaderNode() const { return HeaderNode; } + + /// Nodes - The basic blocks in this interval. + /// + std::vector<BasicBlock*> Nodes; + + /// Successors - List of BasicBlocks that are reachable directly from nodes in + /// this interval, but are not in the interval themselves. + /// These nodes necessarily must be header nodes for other intervals. + /// + std::vector<BasicBlock*> Successors; + + /// Predecessors - List of BasicBlocks that have this Interval's header block + /// as one of their successors. + /// + std::vector<BasicBlock*> Predecessors; + + /// contains - Find out if a basic block is in this interval + inline bool contains(BasicBlock *BB) const { + for (unsigned i = 0; i < Nodes.size(); ++i) + if (Nodes[i] == BB) return true; + return false; + // I don't want the dependency on <algorithm> + //return find(Nodes.begin(), Nodes.end(), BB) != Nodes.end(); + } + + /// isSuccessor - find out if a basic block is a successor of this Interval + inline bool isSuccessor(BasicBlock *BB) const { + for (unsigned i = 0; i < Successors.size(); ++i) + if (Successors[i] == BB) return true; + return false; + // I don't want the dependency on <algorithm> + //return find(Successors.begin(), Successors.end(), BB) != Successors.end(); + } + + /// Equality operator. It is only valid to compare two intervals from the + /// same partition, because of this, all we have to check is the header node + /// for equality. + /// + inline bool operator==(const Interval &I) const { + return HeaderNode == I.HeaderNode; + } + + /// isLoop - Find out if there is a back edge in this interval... + bool isLoop() const; + + /// print - Show contents in human readable format... + void print(std::ostream &O) const; + void print(std::ostream *O) const { if (O) print(*O); } +}; + +/// succ_begin/succ_end - define methods so that Intervals may be used +/// just like BasicBlocks can with the succ_* functions, and *::succ_iterator. +/// +inline Interval::succ_iterator succ_begin(Interval *I) { + return I->Successors.begin(); +} +inline Interval::succ_iterator succ_end(Interval *I) { + return I->Successors.end(); +} + +/// pred_begin/pred_end - define methods so that Intervals may be used +/// just like BasicBlocks can with the pred_* functions, and *::pred_iterator. +/// +inline Interval::pred_iterator pred_begin(Interval *I) { + return I->Predecessors.begin(); +} +inline Interval::pred_iterator pred_end(Interval *I) { + return I->Predecessors.end(); +} + +template <> struct GraphTraits<Interval*> { + typedef Interval NodeType; + typedef Interval::succ_iterator ChildIteratorType; + + static NodeType *getEntryNode(Interval *I) { return I; } + + /// nodes_iterator/begin/end - Allow iteration over all nodes in the graph + static inline ChildIteratorType child_begin(NodeType *N) { + return succ_begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return succ_end(N); + } +}; + +template <> struct GraphTraits<Inverse<Interval*> > { + typedef Interval NodeType; + typedef Interval::pred_iterator ChildIteratorType; + static NodeType *getEntryNode(Inverse<Interval *> G) { return G.Graph; } + static inline ChildIteratorType child_begin(NodeType *N) { + return pred_begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return pred_end(N); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/IntervalIterator.h b/include/llvm/Analysis/IntervalIterator.h new file mode 100644 index 0000000..551bb72 --- /dev/null +++ b/include/llvm/Analysis/IntervalIterator.h @@ -0,0 +1,258 @@ +//===- IntervalIterator.h - Interval Iterator Declaration -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an iterator that enumerates the intervals in a control flow +// graph of some sort. This iterator is parametric, allowing iterator over the +// following types of graphs: +// +// 1. A Function* object, composed of BasicBlock nodes. +// 2. An IntervalPartition& object, composed of Interval nodes. +// +// This iterator is defined to walk the control flow graph, returning intervals +// in depth first order. These intervals are completely filled in except for +// the predecessor fields (the successor information is filled in however). +// +// By default, the intervals created by this iterator are deleted after they +// are no longer any use to the iterator. This behavior can be changed by +// passing a false value into the intervals_begin() function. This causes the +// IOwnMem member to be set, and the intervals to not be deleted. +// +// It is only safe to use this if all of the intervals are deleted by the caller +// and all of the intervals are processed. However, the user of the iterator is +// not allowed to modify or delete the intervals until after the iterator has +// been used completely. The IntervalPartition class uses this functionality. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INTERVAL_ITERATOR_H +#define LLVM_INTERVAL_ITERATOR_H + +#include "llvm/Analysis/IntervalPartition.h" +#include "llvm/Function.h" +#include "llvm/Support/CFG.h" +#include <stack> +#include <set> +#include <algorithm> + +namespace llvm { + +// getNodeHeader - Given a source graph node and the source graph, return the +// BasicBlock that is the header node. This is the opposite of +// getSourceGraphNode. +// +inline BasicBlock *getNodeHeader(BasicBlock *BB) { return BB; } +inline BasicBlock *getNodeHeader(Interval *I) { return I->getHeaderNode(); } + +// getSourceGraphNode - Given a BasicBlock and the source graph, return the +// source graph node that corresponds to the BasicBlock. This is the opposite +// of getNodeHeader. +// +inline BasicBlock *getSourceGraphNode(Function *, BasicBlock *BB) { + return BB; +} +inline Interval *getSourceGraphNode(IntervalPartition *IP, BasicBlock *BB) { + return IP->getBlockInterval(BB); +} + +// addNodeToInterval - This method exists to assist the generic ProcessNode +// with the task of adding a node to the new interval, depending on the +// type of the source node. In the case of a CFG source graph (BasicBlock +// case), the BasicBlock itself is added to the interval. +// +inline void addNodeToInterval(Interval *Int, BasicBlock *BB) { + Int->Nodes.push_back(BB); +} + +// addNodeToInterval - This method exists to assist the generic ProcessNode +// with the task of adding a node to the new interval, depending on the +// type of the source node. In the case of a CFG source graph (BasicBlock +// case), the BasicBlock itself is added to the interval. In the case of +// an IntervalPartition source graph (Interval case), all of the member +// BasicBlocks are added to the interval. +// +inline void addNodeToInterval(Interval *Int, Interval *I) { + // Add all of the nodes in I as new nodes in Int. + copy(I->Nodes.begin(), I->Nodes.end(), back_inserter(Int->Nodes)); +} + + + + + +template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>, + class IGT = GraphTraits<Inverse<NodeTy*> > > +class IntervalIterator { + std::stack<std::pair<Interval*, typename Interval::succ_iterator> > IntStack; + std::set<BasicBlock*> Visited; + OrigContainer_t *OrigContainer; + bool IOwnMem; // If True, delete intervals when done with them + // See file header for conditions of use +public: + typedef IntervalIterator<NodeTy, OrigContainer_t> _Self; + typedef std::forward_iterator_tag iterator_category; + + IntervalIterator() {} // End iterator, empty stack + IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) { + OrigContainer = M; + if (!ProcessInterval(&M->front())) { + assert(0 && "ProcessInterval should never fail for first interval!"); + } + } + + IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) { + OrigContainer = &IP; + if (!ProcessInterval(IP.getRootInterval())) { + assert(0 && "ProcessInterval should never fail for first interval!"); + } + } + + inline ~IntervalIterator() { + if (IOwnMem) + while (!IntStack.empty()) { + delete operator*(); + IntStack.pop(); + } + } + + inline bool operator==(const _Self& x) const { return IntStack == x.IntStack;} + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline const Interval *operator*() const { return IntStack.top().first; } + inline Interval *operator*() { return IntStack.top().first; } + inline const Interval *operator->() const { return operator*(); } + inline Interval *operator->() { return operator*(); } + + _Self& operator++() { // Preincrement + assert(!IntStack.empty() && "Attempting to use interval iterator at end!"); + do { + // All of the intervals on the stack have been visited. Try visiting + // their successors now. + Interval::succ_iterator &SuccIt = IntStack.top().second, + EndIt = succ_end(IntStack.top().first); + while (SuccIt != EndIt) { // Loop over all interval succs + bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt)); + ++SuccIt; // Increment iterator + if (Done) return *this; // Found a new interval! Use it! + } + + // Free interval memory... if necessary + if (IOwnMem) delete IntStack.top().first; + + // We ran out of successors for this interval... pop off the stack + IntStack.pop(); + } while (!IntStack.empty()); + + return *this; + } + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } + +private: + // ProcessInterval - This method is used during the construction of the + // interval graph. It walks through the source graph, recursively creating + // an interval per invokation until the entire graph is covered. This uses + // the ProcessNode method to add all of the nodes to the interval. + // + // This method is templated because it may operate on two different source + // graphs: a basic block graph, or a preexisting interval graph. + // + bool ProcessInterval(NodeTy *Node) { + BasicBlock *Header = getNodeHeader(Node); + if (Visited.count(Header)) return false; + + Interval *Int = new Interval(Header); + Visited.insert(Header); // The header has now been visited! + + // Check all of our successors to see if they are in the interval... + for (typename GT::ChildIteratorType I = GT::child_begin(Node), + E = GT::child_end(Node); I != E; ++I) + ProcessNode(Int, getSourceGraphNode(OrigContainer, *I)); + + IntStack.push(std::make_pair(Int, succ_begin(Int))); + return true; + } + + // ProcessNode - This method is called by ProcessInterval to add nodes to the + // interval being constructed, and it is also called recursively as it walks + // the source graph. A node is added to the current interval only if all of + // its predecessors are already in the graph. This also takes care of keeping + // the successor set of an interval up to date. + // + // This method is templated because it may operate on two different source + // graphs: a basic block graph, or a preexisting interval graph. + // + void ProcessNode(Interval *Int, NodeTy *Node) { + assert(Int && "Null interval == bad!"); + assert(Node && "Null Node == bad!"); + + BasicBlock *NodeHeader = getNodeHeader(Node); + + if (Visited.count(NodeHeader)) { // Node already been visited? + if (Int->contains(NodeHeader)) { // Already in this interval... + return; + } else { // In other interval, add as successor + if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set + Int->Successors.push_back(NodeHeader); + } + } else { // Otherwise, not in interval yet + for (typename IGT::ChildIteratorType I = IGT::child_begin(Node), + E = IGT::child_end(Node); I != E; ++I) { + if (!Int->contains(*I)) { // If pred not in interval, we can't be + if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set + Int->Successors.push_back(NodeHeader); + return; // See you later + } + } + + // If we get here, then all of the predecessors of BB are in the interval + // already. In this case, we must add BB to the interval! + addNodeToInterval(Int, Node); + Visited.insert(NodeHeader); // The node has now been visited! + + if (Int->isSuccessor(NodeHeader)) { + // If we were in the successor list from before... remove from succ list + Int->Successors.erase(std::remove(Int->Successors.begin(), + Int->Successors.end(), NodeHeader), + Int->Successors.end()); + } + + // Now that we have discovered that Node is in the interval, perhaps some + // of its successors are as well? + for (typename GT::ChildIteratorType It = GT::child_begin(Node), + End = GT::child_end(Node); It != End; ++It) + ProcessNode(Int, getSourceGraphNode(OrigContainer, *It)); + } + } +}; + +typedef IntervalIterator<BasicBlock, Function> function_interval_iterator; +typedef IntervalIterator<Interval, IntervalPartition> interval_part_interval_iterator; + + +inline function_interval_iterator intervals_begin(Function *F, + bool DeleteInts = true) { + return function_interval_iterator(F, DeleteInts); +} +inline function_interval_iterator intervals_end(Function *) { + return function_interval_iterator(); +} + +inline interval_part_interval_iterator + intervals_begin(IntervalPartition &IP, bool DeleteIntervals = true) { + return interval_part_interval_iterator(IP, DeleteIntervals); +} + +inline interval_part_interval_iterator intervals_end(IntervalPartition &IP) { + return interval_part_interval_iterator(); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/IntervalPartition.h b/include/llvm/Analysis/IntervalPartition.h new file mode 100644 index 0000000..feae6d8 --- /dev/null +++ b/include/llvm/Analysis/IntervalPartition.h @@ -0,0 +1,112 @@ +//===- IntervalPartition.h - Interval partition Calculation -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the IntervalPartition class, which +// calculates and represents the interval partition of a function, or a +// preexisting interval partition. +// +// In this way, the interval partition may be used to reduce a flow graph down +// to its degenerate single node interval partition (unless it is irreducible). +// +// TODO: The IntervalPartition class should take a bool parameter that tells +// whether it should add the "tails" of an interval to an interval itself or if +// they should be represented as distinct intervals. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INTERVAL_PARTITION_H +#define LLVM_INTERVAL_PARTITION_H + +#include "llvm/Analysis/Interval.h" +#include "llvm/Pass.h" +#include <map> + +namespace llvm { + +//===----------------------------------------------------------------------===// +// +// IntervalPartition - This class builds and holds an "interval partition" for +// a function. This partition divides the control flow graph into a set of +// maximal intervals, as defined with the properties above. Intuitively, a +// BasicBlock is a (possibly nonexistent) loop with a "tail" of non looping +// nodes following it. +// +class IntervalPartition : public FunctionPass { + typedef std::map<BasicBlock*, Interval*> IntervalMapTy; + IntervalMapTy IntervalMap; + + typedef std::vector<Interval*> IntervalListTy; + Interval *RootInterval; + std::vector<Interval*> Intervals; + +public: + static char ID; // Pass identification, replacement for typeid + + IntervalPartition() : FunctionPass(&ID), RootInterval(0) {} + + // run - Calculate the interval partition for this function + virtual bool runOnFunction(Function &F); + + // IntervalPartition ctor - Build a reduced interval partition from an + // existing interval graph. This takes an additional boolean parameter to + // distinguish it from a copy constructor. Always pass in false for now. + // + IntervalPartition(IntervalPartition &I, bool); + + // print - Show contents in human readable format... + virtual void print(std::ostream &O, const Module* = 0) const; + void print(std::ostream *O, const Module* M = 0) const { + if (O) print(*O, M); + } + + // getRootInterval() - Return the root interval that contains the starting + // block of the function. + inline Interval *getRootInterval() { return RootInterval; } + + // isDegeneratePartition() - Returns true if the interval partition contains + // a single interval, and thus cannot be simplified anymore. + bool isDegeneratePartition() { return Intervals.size() == 1; } + + // TODO: isIrreducible - look for triangle graph. + + // getBlockInterval - Return the interval that a basic block exists in. + inline Interval *getBlockInterval(BasicBlock *BB) { + IntervalMapTy::iterator I = IntervalMap.find(BB); + return I != IntervalMap.end() ? I->second : 0; + } + + // getAnalysisUsage - Implement the Pass API + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + + // Interface to Intervals vector... + const std::vector<Interval*> &getIntervals() const { return Intervals; } + + // releaseMemory - Reset state back to before function was analyzed + void releaseMemory(); + +private: + // addIntervalToPartition - Add an interval to the internal list of intervals, + // and then add mappings from all of the basic blocks in the interval to the + // interval itself (in the IntervalMap). + // + void addIntervalToPartition(Interval *I); + + // updatePredecessors - Interval generation only sets the successor fields of + // the interval data structures. After interval generation is complete, + // run through all of the intervals and propagate successor info as + // predecessor info. + // + void updatePredecessors(Interval *Int); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/LibCallAliasAnalysis.h b/include/llvm/Analysis/LibCallAliasAnalysis.h new file mode 100644 index 0000000..ea17a23 --- /dev/null +++ b/include/llvm/Analysis/LibCallAliasAnalysis.h @@ -0,0 +1,61 @@ +//===- LibCallAliasAnalysis.h - Implement AliasAnalysis for libcalls ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the LibCallAliasAnalysis class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LIBCALL_AA_H +#define LLVM_ANALYSIS_LIBCALL_AA_H + +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Pass.h" + +namespace llvm { + class LibCallInfo; + struct LibCallFunctionInfo; + + /// LibCallAliasAnalysis - Alias analysis driven from LibCallInfo. + struct LibCallAliasAnalysis : public FunctionPass, AliasAnalysis { + static char ID; // Class identification + + LibCallInfo *LCI; + + explicit LibCallAliasAnalysis(LibCallInfo *LC = 0) + : FunctionPass(&ID), LCI(LC) { + } + explicit LibCallAliasAnalysis(const void *ID, LibCallInfo *LC) + : FunctionPass(ID), LCI(LC) { + } + ~LibCallAliasAnalysis(); + + ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size); + + ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) { + // TODO: Could compare two direct calls against each other if we cared to. + return AliasAnalysis::getModRefInfo(CS1,CS2); + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + virtual bool runOnFunction(Function &F) { + InitializeAliasAnalysis(this); // set up super class + return false; + } + + /// hasNoModRefInfoForCalls - We can provide mod/ref information against + /// non-escaping allocations. + virtual bool hasNoModRefInfoForCalls() const { return false; } + private: + ModRefResult AnalyzeLibCallDetails(const LibCallFunctionInfo *FI, + CallSite CS, Value *P, unsigned Size); + }; +} // End of llvm namespace + +#endif diff --git a/include/llvm/Analysis/LibCallSemantics.h b/include/llvm/Analysis/LibCallSemantics.h new file mode 100644 index 0000000..74e8401 --- /dev/null +++ b/include/llvm/Analysis/LibCallSemantics.h @@ -0,0 +1,166 @@ +//===- LibCallSemantics.h - Describe library semantics --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines interfaces that can be used to describe language specific +// runtime library interfaces (e.g. libc, libm, etc) to LLVM optimizers. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LIBCALLSEMANTICS_H +#define LLVM_ANALYSIS_LIBCALLSEMANTICS_H + +#include "llvm/Analysis/AliasAnalysis.h" + +namespace llvm { + + /// LibCallLocationInfo - This struct describes a set of memory locations that + /// are accessed by libcalls. Identification of a location is doing with a + /// simple callback function. + /// + /// For example, the LibCallInfo may be set up to model the behavior of + /// standard libm functions. The location that they may be interested in is + /// an abstract location that represents errno for the current target. In + /// this case, a location for errno is anything such that the predicate + /// returns true. On Mac OS/X, this predicate would return true if the + /// pointer is the result of a call to "__error()". + /// + /// Locations can also be defined in a constant-sensitive way. For example, + /// it is possible to define a location that returns true iff it is passed + /// into the call as a specific argument. This is useful for modeling things + /// like "printf", which can store to memory, but only through pointers passed + /// with a '%n' constraint. + /// + struct LibCallLocationInfo { + // TODO: Flags: isContextSensitive etc. + + /// isLocation - Return a LocResult if the specified pointer refers to this + /// location for the specified call site. This returns "Yes" if we can tell + /// that the pointer *does definitely* refer to the location, "No" if we can + /// tell that the location *definitely does not* refer to the location, and + /// returns "Unknown" if we cannot tell for certain. + enum LocResult { + Yes, No, Unknown + }; + LocResult (*isLocation)(CallSite CS, const Value *Ptr, unsigned Size); + }; + + /// LibCallFunctionInfo - Each record in the array of FunctionInfo structs + /// records the behavior of one libcall that is known by the optimizer. This + /// captures things like the side effects of the call. Side effects are + /// modeled both universally (in the readnone/readonly) sense, but also + /// potentially against a set of abstract locations defined by the optimizer. + /// This allows an optimizer to define that some libcall (e.g. sqrt) is + /// side-effect free except that it might modify errno (thus, the call is + /// *not* universally readonly). Or it might say that the side effects + /// are unknown other than to say that errno is not modified. + /// + struct LibCallFunctionInfo { + /// Name - This is the name of the libcall this describes. + const char *Name; + + /// TODO: Constant folding function: Constant* vector -> Constant*. + + /// UniversalBehavior - This captures the absolute mod/ref behavior without + /// any specific context knowledge. For example, if the function is known + /// to be readonly, this would be set to 'ref'. If known to be readnone, + /// this is set to NoModRef. + AliasAnalysis::ModRefResult UniversalBehavior; + + /// LocationMRInfo - This pair captures info about whether a specific + /// location is modified or referenced by a libcall. + struct LocationMRInfo { + /// LocationID - ID # of the accessed location or ~0U for array end. + unsigned LocationID; + /// MRInfo - Mod/Ref info for this location. + AliasAnalysis::ModRefResult MRInfo; + }; + + /// DetailsType - Indicate the sense of the LocationDetails array. This + /// controls how the LocationDetails array is interpreted. + enum { + /// DoesOnly - If DetailsType is set to DoesOnly, then we know that the + /// *only* mod/ref behavior of this function is captured by the + /// LocationDetails array. If we are trying to say that 'sqrt' can only + /// modify errno, we'd have the {errnoloc,mod} in the LocationDetails + /// array and have DetailsType set to DoesOnly. + DoesOnly, + + /// DoesNot - If DetailsType is set to DoesNot, then the sense of the + /// LocationDetails array is completely inverted. This means that we *do + /// not* know everything about the side effects of this libcall, but we do + /// know things that the libcall cannot do. This is useful for complex + /// functions like 'ctime' which have crazy mod/ref behavior, but are + /// known to never read or write errno. In this case, we'd have + /// {errnoloc,modref} in the LocationDetails array and DetailsType would + /// be set to DoesNot, indicating that ctime does not read or write the + /// errno location. + DoesNot + } DetailsType; + + /// LocationDetails - This is a pointer to an array of LocationMRInfo + /// structs which indicates the behavior of the libcall w.r.t. specific + /// locations. For example, if this libcall is known to only modify + /// 'errno', it would have a LocationDetails array with the errno ID and + /// 'mod' in it. See the DetailsType field for how this is interpreted. + /// + /// In the "DoesOnly" case, this information is 'may' information for: there + /// is no guarantee that the specified side effect actually does happen, + /// just that it could. In the "DoesNot" case, this is 'must not' info. + /// + /// If this pointer is null, no details are known. + /// + const LocationMRInfo *LocationDetails; + }; + + + /// LibCallInfo - Abstract interface to query about library call information. + /// Instances of this class return known information about some set of + /// libcalls. + /// + class LibCallInfo { + // Implementation details of this object, private. + mutable void *Impl; + mutable const LibCallLocationInfo *Locations; + mutable unsigned NumLocations; + public: + LibCallInfo() : Impl(0), Locations(0), NumLocations(0) {} + virtual ~LibCallInfo(); + + //===------------------------------------------------------------------===// + // Accessor Methods: Efficient access to contained data. + //===------------------------------------------------------------------===// + + /// getLocationInfo - Return information about the specified LocationID. + const LibCallLocationInfo &getLocationInfo(unsigned LocID) const; + + + /// getFunctionInfo - Return the LibCallFunctionInfo object corresponding to + /// the specified function if we have it. If not, return null. + const LibCallFunctionInfo *getFunctionInfo(Function *F) const; + + + //===------------------------------------------------------------------===// + // Implementation Methods: Subclasses should implement these. + //===------------------------------------------------------------------===// + + /// getLocationInfo - Return descriptors for the locations referenced by + /// this set of libcalls. + virtual unsigned getLocationInfo(const LibCallLocationInfo *&Array) const { + return 0; + } + + /// getFunctionInfoArray - Return an array of descriptors that describe the + /// set of libcalls represented by this LibCallInfo object. This array is + /// terminated by an entry with a NULL name. + virtual const LibCallFunctionInfo *getFunctionInfoArray() const = 0; + }; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Analysis/LiveValues.h b/include/llvm/Analysis/LiveValues.h new file mode 100644 index 0000000..31b00d7 --- /dev/null +++ b/include/llvm/Analysis/LiveValues.h @@ -0,0 +1,103 @@ +//===- LiveValues.h - Liveness information for LLVM IR Values. ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface for the LLVM IR Value liveness +// analysis pass. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LIVEVALUES_H +#define LLVM_ANALYSIS_LIVEVALUES_H + +#include "llvm/Pass.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" + +namespace llvm { + +class DominatorTree; +class LoopInfo; +class Value; + +/// LiveValues - Analysis that provides liveness information for +/// LLVM IR Values. +/// +class LiveValues : public FunctionPass { + DominatorTree *DT; + LoopInfo *LI; + + /// Memo - A bunch of state to be associated with a value. + /// + struct Memo { + /// Used - The set of blocks which contain a use of the value. + /// + SmallPtrSet<const BasicBlock *, 4> Used; + + /// LiveThrough - A conservative approximation of the set of blocks in + /// which the value is live-through, meaning blocks properly dominated + /// by the definition, and from which blocks containing uses of the + /// value are reachable. + /// + SmallPtrSet<const BasicBlock *, 4> LiveThrough; + + /// Killed - A conservative approximation of the set of blocks in which + /// the value is used and not live-out. + /// + SmallPtrSet<const BasicBlock *, 4> Killed; + }; + + /// Memos - Remembers the Memo for each Value. This is populated on + /// demand. + /// + DenseMap<const Value *, Memo> Memos; + + /// getMemo - Retrieve an existing Memo for the given value if one + /// is available, otherwise compute a new one. + /// + Memo &getMemo(const Value *V); + + /// compute - Compute a new Memo for the given value. + /// + Memo &compute(const Value *V); + +public: + static char ID; + LiveValues(); + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + virtual bool runOnFunction(Function &F); + virtual void releaseMemory(); + + /// isUsedInBlock - Test if the given value is used in the given block. + /// + bool isUsedInBlock(const Value *V, const BasicBlock *BB); + + /// isLiveThroughBlock - Test if the given value is known to be + /// live-through the given block, meaning that the block is properly + /// dominated by the value's definition, and there exists a block + /// reachable from it that contains a use. This uses a conservative + /// approximation that errs on the side of returning false. + /// + bool isLiveThroughBlock(const Value *V, const BasicBlock *BB); + + /// isKilledInBlock - Test if the given value is known to be killed in + /// the given block, meaning that the block contains a use of the value, + /// and no blocks reachable from the block contain a use. This uses a + /// conservative approximation that errs on the side of returning false. + /// + bool isKilledInBlock(const Value *V, const BasicBlock *BB); +}; + +/// createLiveValuesPass - This creates an instance of the LiveValues pass. +/// +FunctionPass *createLiveValuesPass(); + +} + +#endif diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h new file mode 100644 index 0000000..fb0b584 --- /dev/null +++ b/include/llvm/Analysis/LoopInfo.h @@ -0,0 +1,1095 @@ +//===- llvm/Analysis/LoopInfo.h - Natural Loop Calculator -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the LoopInfo class that is used to identify natural loops +// and determine the loop depth of various nodes of the CFG. Note that natural +// loops may actually be several loops that share the same header node. +// +// This analysis calculates the nesting structure of loops in a function. For +// each natural loop identified, this analysis identifies natural loops +// contained entirely within the loop and the basic blocks the make up the loop. +// +// It can calculate on the fly various bits of information, for example: +// +// * whether there is a preheader for the loop +// * the number of back edges to the header +// * whether or not a particular block branches out of the loop +// * the successor blocks of the loop +// * the loop depth +// * the trip count +// * etc... +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LOOP_INFO_H +#define LLVM_ANALYSIS_LOOP_INFO_H + +#include "llvm/Pass.h" +#include "llvm/Constants.h" +#include "llvm/Instructions.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/Streams.h" +#include <algorithm> +#include <ostream> + +namespace llvm { + +template<typename T> +static void RemoveFromVector(std::vector<T*> &V, T *N) { + typename std::vector<T*>::iterator I = std::find(V.begin(), V.end(), N); + assert(I != V.end() && "N is not in this list!"); + V.erase(I); +} + +class DominatorTree; +class LoopInfo; +template<class N> class LoopInfoBase; +template<class N> class LoopBase; + +typedef LoopBase<BasicBlock> Loop; + +//===----------------------------------------------------------------------===// +/// LoopBase class - Instances of this class are used to represent loops that +/// are detected in the flow graph +/// +template<class BlockT> +class LoopBase { + LoopBase<BlockT> *ParentLoop; + // SubLoops - Loops contained entirely within this one. + std::vector<LoopBase<BlockT>*> SubLoops; + + // Blocks - The list of blocks in this loop. First entry is the header node. + std::vector<BlockT*> Blocks; + + LoopBase(const LoopBase<BlockT> &); // DO NOT IMPLEMENT + const LoopBase<BlockT>&operator=(const LoopBase<BlockT> &);// DO NOT IMPLEMENT +public: + /// Loop ctor - This creates an empty loop. + LoopBase() : ParentLoop(0) {} + ~LoopBase() { + for (size_t i = 0, e = SubLoops.size(); i != e; ++i) + delete SubLoops[i]; + } + + /// getLoopDepth - Return the nesting level of this loop. An outer-most + /// loop has depth 1, for consistency with loop depth values used for basic + /// blocks, where depth 0 is used for blocks not inside any loops. + unsigned getLoopDepth() const { + unsigned D = 1; + for (const LoopBase<BlockT> *CurLoop = ParentLoop; CurLoop; + CurLoop = CurLoop->ParentLoop) + ++D; + return D; + } + BlockT *getHeader() const { return Blocks.front(); } + LoopBase<BlockT> *getParentLoop() const { return ParentLoop; } + + /// contains - Return true if the specified basic block is in this loop + /// + bool contains(const BlockT *BB) const { + return std::find(block_begin(), block_end(), BB) != block_end(); + } + + /// iterator/begin/end - Return the loops contained entirely within this loop. + /// + const std::vector<LoopBase<BlockT>*> &getSubLoops() const { return SubLoops; } + typedef typename std::vector<LoopBase<BlockT>*>::const_iterator iterator; + iterator begin() const { return SubLoops.begin(); } + iterator end() const { return SubLoops.end(); } + bool empty() const { return SubLoops.empty(); } + + /// getBlocks - Get a list of the basic blocks which make up this loop. + /// + const std::vector<BlockT*> &getBlocks() const { return Blocks; } + typedef typename std::vector<BlockT*>::const_iterator block_iterator; + block_iterator block_begin() const { return Blocks.begin(); } + block_iterator block_end() const { return Blocks.end(); } + + /// isLoopExit - True if terminator in the block can branch to another block + /// that is outside of the current loop. + /// + bool isLoopExit(const BlockT *BB) const { + typedef GraphTraits<BlockT*> BlockTraits; + for (typename BlockTraits::ChildIteratorType SI = + BlockTraits::child_begin(const_cast<BlockT*>(BB)), + SE = BlockTraits::child_end(const_cast<BlockT*>(BB)); SI != SE; ++SI) { + if (!contains(*SI)) + return true; + } + return false; + } + + /// getNumBackEdges - Calculate the number of back edges to the loop header + /// + unsigned getNumBackEdges() const { + unsigned NumBackEdges = 0; + BlockT *H = getHeader(); + + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + for (typename InvBlockTraits::ChildIteratorType I = + InvBlockTraits::child_begin(const_cast<BlockT*>(H)), + E = InvBlockTraits::child_end(const_cast<BlockT*>(H)); I != E; ++I) + if (contains(*I)) + ++NumBackEdges; + + return NumBackEdges; + } + + /// isLoopInvariant - Return true if the specified value is loop invariant + /// + inline bool isLoopInvariant(Value *V) const { + if (Instruction *I = dyn_cast<Instruction>(V)) + return !contains(I->getParent()); + return true; // All non-instructions are loop invariant + } + + //===--------------------------------------------------------------------===// + // APIs for simple analysis of the loop. + // + // Note that all of these methods can fail on general loops (ie, there may not + // be a preheader, etc). For best success, the loop simplification and + // induction variable canonicalization pass should be used to normalize loops + // for easy analysis. These methods assume canonical loops. + + /// getExitingBlocks - Return all blocks inside the loop that have successors + /// outside of the loop. These are the blocks _inside of the current loop_ + /// which branch out. The returned list is always unique. + /// + void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const { + // Sort the blocks vector so that we can use binary search to do quick + // lookups. + SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end()); + std::sort(LoopBBs.begin(), LoopBBs.end()); + + typedef GraphTraits<BlockT*> BlockTraits; + for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) + for (typename BlockTraits::ChildIteratorType I = + BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI); + I != E; ++I) + if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) { + // Not in current loop? It must be an exit block. + ExitingBlocks.push_back(*BI); + break; + } + } + + /// getExitingBlock - If getExitingBlocks would return exactly one block, + /// return that block. Otherwise return null. + BlockT *getExitingBlock() const { + SmallVector<BlockT*, 8> ExitingBlocks; + getExitingBlocks(ExitingBlocks); + if (ExitingBlocks.size() == 1) + return ExitingBlocks[0]; + return 0; + } + + /// getExitBlocks - Return all of the successor blocks of this loop. These + /// are the blocks _outside of the current loop_ which are branched to. + /// + void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const { + // Sort the blocks vector so that we can use binary search to do quick + // lookups. + SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end()); + std::sort(LoopBBs.begin(), LoopBBs.end()); + + typedef GraphTraits<BlockT*> BlockTraits; + for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) + for (typename BlockTraits::ChildIteratorType I = + BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI); + I != E; ++I) + if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) + // Not in current loop? It must be an exit block. + ExitBlocks.push_back(*I); + } + + /// getExitBlock - If getExitBlocks would return exactly one block, + /// return that block. Otherwise return null. + BlockT *getExitBlock() const { + SmallVector<BlockT*, 8> ExitBlocks; + getExitBlocks(ExitBlocks); + if (ExitBlocks.size() == 1) + return ExitBlocks[0]; + return 0; + } + + /// getUniqueExitBlocks - Return all unique successor blocks of this loop. + /// These are the blocks _outside of the current loop_ which are branched to. + /// This assumes that loop is in canonical form. + /// + void getUniqueExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const { + // Sort the blocks vector so that we can use binary search to do quick + // lookups. + SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end()); + std::sort(LoopBBs.begin(), LoopBBs.end()); + + std::vector<BlockT*> switchExitBlocks; + + for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) { + + BlockT *current = *BI; + switchExitBlocks.clear(); + + typedef GraphTraits<BlockT*> BlockTraits; + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + for (typename BlockTraits::ChildIteratorType I = + BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI); + I != E; ++I) { + if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) + // If block is inside the loop then it is not a exit block. + continue; + + typename InvBlockTraits::ChildIteratorType PI = + InvBlockTraits::child_begin(*I); + BlockT *firstPred = *PI; + + // If current basic block is this exit block's first predecessor + // then only insert exit block in to the output ExitBlocks vector. + // This ensures that same exit block is not inserted twice into + // ExitBlocks vector. + if (current != firstPred) + continue; + + // If a terminator has more then two successors, for example SwitchInst, + // then it is possible that there are multiple edges from current block + // to one exit block. + if (std::distance(BlockTraits::child_begin(current), + BlockTraits::child_end(current)) <= 2) { + ExitBlocks.push_back(*I); + continue; + } + + // In case of multiple edges from current block to exit block, collect + // only one edge in ExitBlocks. Use switchExitBlocks to keep track of + // duplicate edges. + if (std::find(switchExitBlocks.begin(), switchExitBlocks.end(), *I) + == switchExitBlocks.end()) { + switchExitBlocks.push_back(*I); + ExitBlocks.push_back(*I); + } + } + } + } + + /// getLoopPreheader - If there is a preheader for this loop, return it. A + /// loop has a preheader if there is only one edge to the header of the loop + /// from outside of the loop. If this is the case, the block branching to the + /// header of the loop is the preheader node. + /// + /// This method returns null if there is no preheader for the loop. + /// + BlockT *getLoopPreheader() const { + // Keep track of nodes outside the loop branching to the header... + BlockT *Out = 0; + + // Loop over the predecessors of the header node... + BlockT *Header = getHeader(); + typedef GraphTraits<BlockT*> BlockTraits; + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + for (typename InvBlockTraits::ChildIteratorType PI = + InvBlockTraits::child_begin(Header), + PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) + if (!contains(*PI)) { // If the block is not in the loop... + if (Out && Out != *PI) + return 0; // Multiple predecessors outside the loop + Out = *PI; + } + + // Make sure there is only one exit out of the preheader. + assert(Out && "Header of loop has no predecessors from outside loop?"); + typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out); + ++SI; + if (SI != BlockTraits::child_end(Out)) + return 0; // Multiple exits from the block, must not be a preheader. + + // If there is exactly one preheader, return it. If there was zero, then + // Out is still null. + return Out; + } + + /// getLoopLatch - If there is a single latch block for this loop, return it. + /// A latch block is a block that contains a branch back to the header. + /// A loop header in normal form has two edges into it: one from a preheader + /// and one from a latch block. + BlockT *getLoopLatch() const { + BlockT *Header = getHeader(); + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + typename InvBlockTraits::ChildIteratorType PI = + InvBlockTraits::child_begin(Header); + typename InvBlockTraits::ChildIteratorType PE = + InvBlockTraits::child_end(Header); + if (PI == PE) return 0; // no preds? + + BlockT *Latch = 0; + if (contains(*PI)) + Latch = *PI; + ++PI; + if (PI == PE) return 0; // only one pred? + + if (contains(*PI)) { + if (Latch) return 0; // multiple backedges + Latch = *PI; + } + ++PI; + if (PI != PE) return 0; // more than two preds + + return Latch; + } + + /// getCanonicalInductionVariable - Check to see if the loop has a canonical + /// induction variable: an integer recurrence that starts at 0 and increments + /// by one each time through the loop. If so, return the phi node that + /// corresponds to it. + /// + /// The IndVarSimplify pass transforms loops to have a canonical induction + /// variable. + /// + inline PHINode *getCanonicalInductionVariable() const { + BlockT *H = getHeader(); + + BlockT *Incoming = 0, *Backedge = 0; + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + typename InvBlockTraits::ChildIteratorType PI = + InvBlockTraits::child_begin(H); + assert(PI != InvBlockTraits::child_end(H) && + "Loop must have at least one backedge!"); + Backedge = *PI++; + if (PI == InvBlockTraits::child_end(H)) return 0; // dead loop + Incoming = *PI++; + if (PI != InvBlockTraits::child_end(H)) return 0; // multiple backedges? + + if (contains(Incoming)) { + if (contains(Backedge)) + return 0; + std::swap(Incoming, Backedge); + } else if (!contains(Backedge)) + return 0; + + // Loop over all of the PHI nodes, looking for a canonical indvar. + for (typename BlockT::iterator I = H->begin(); isa<PHINode>(I); ++I) { + PHINode *PN = cast<PHINode>(I); + if (ConstantInt *CI = + dyn_cast<ConstantInt>(PN->getIncomingValueForBlock(Incoming))) + if (CI->isNullValue()) + if (Instruction *Inc = + dyn_cast<Instruction>(PN->getIncomingValueForBlock(Backedge))) + if (Inc->getOpcode() == Instruction::Add && + Inc->getOperand(0) == PN) + if (ConstantInt *CI = dyn_cast<ConstantInt>(Inc->getOperand(1))) + if (CI->equalsInt(1)) + return PN; + } + return 0; + } + + /// getCanonicalInductionVariableIncrement - Return the LLVM value that holds + /// the canonical induction variable value for the "next" iteration of the + /// loop. This always succeeds if getCanonicalInductionVariable succeeds. + /// + inline Instruction *getCanonicalInductionVariableIncrement() const { + if (PHINode *PN = getCanonicalInductionVariable()) { + bool P1InLoop = contains(PN->getIncomingBlock(1)); + return cast<Instruction>(PN->getIncomingValue(P1InLoop)); + } + return 0; + } + + /// getTripCount - Return a loop-invariant LLVM value indicating the number of + /// times the loop will be executed. Note that this means that the backedge + /// of the loop executes N-1 times. If the trip-count cannot be determined, + /// this returns null. + /// + /// The IndVarSimplify pass transforms loops to have a form that this + /// function easily understands. + /// + inline Value *getTripCount() const { + // Canonical loops will end with a 'cmp ne I, V', where I is the incremented + // canonical induction variable and V is the trip count of the loop. + Instruction *Inc = getCanonicalInductionVariableIncrement(); + if (Inc == 0) return 0; + PHINode *IV = cast<PHINode>(Inc->getOperand(0)); + + BlockT *BackedgeBlock = + IV->getIncomingBlock(contains(IV->getIncomingBlock(1))); + + if (BranchInst *BI = dyn_cast<BranchInst>(BackedgeBlock->getTerminator())) + if (BI->isConditional()) { + if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) { + if (ICI->getOperand(0) == Inc) { + if (BI->getSuccessor(0) == getHeader()) { + if (ICI->getPredicate() == ICmpInst::ICMP_NE) + return ICI->getOperand(1); + } else if (ICI->getPredicate() == ICmpInst::ICMP_EQ) { + return ICI->getOperand(1); + } + } + } + } + + return 0; + } + + /// getSmallConstantTripCount - Returns the trip count of this loop as a + /// normal unsigned value, if possible. Returns 0 if the trip count is unknown + /// of not constant. Will also return 0 if the trip count is very large + /// (>= 2^32) + inline unsigned getSmallConstantTripCount() const { + Value* TripCount = this->getTripCount(); + if (TripCount) { + if (ConstantInt *TripCountC = dyn_cast<ConstantInt>(TripCount)) { + // Guard against huge trip counts. + if (TripCountC->getValue().getActiveBits() <= 32) { + return (unsigned)TripCountC->getZExtValue(); + } + } + } + return 0; + } + + /// getSmallConstantTripMultiple - Returns the largest constant divisor of the + /// trip count of this loop as a normal unsigned value, if possible. This + /// means that the actual trip count is always a multiple of the returned + /// value (don't forget the trip count could very well be zero as well!). + /// + /// Returns 1 if the trip count is unknown or not guaranteed to be the + /// multiple of a constant (which is also the case if the trip count is simply + /// constant, use getSmallConstantTripCount for that case), Will also return 1 + /// if the trip count is very large (>= 2^32). + inline unsigned getSmallConstantTripMultiple() const { + Value* TripCount = this->getTripCount(); + // This will hold the ConstantInt result, if any + ConstantInt *Result = NULL; + if (TripCount) { + // See if the trip count is constant itself + Result = dyn_cast<ConstantInt>(TripCount); + // if not, see if it is a multiplication + if (!Result) + if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TripCount)) { + switch (BO->getOpcode()) { + case BinaryOperator::Mul: + Result = dyn_cast<ConstantInt>(BO->getOperand(1)); + break; + default: + break; + } + } + } + // Guard against huge trip counts. + if (Result && Result->getValue().getActiveBits() <= 32) { + return (unsigned)Result->getZExtValue(); + } else { + return 1; + } + } + + /// isLCSSAForm - Return true if the Loop is in LCSSA form + inline bool isLCSSAForm() const { + // Sort the blocks vector so that we can use binary search to do quick + // lookups. + SmallPtrSet<BlockT*, 16> LoopBBs(block_begin(), block_end()); + + for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) { + BlockT *BB = *BI; + for (typename BlockT::iterator I = BB->begin(), E = BB->end(); I != E;++I) + for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; + ++UI) { + BlockT *UserBB = cast<Instruction>(*UI)->getParent(); + if (PHINode *P = dyn_cast<PHINode>(*UI)) { + UserBB = P->getIncomingBlock(UI); + } + + // Check the current block, as a fast-path. Most values are used in + // the same block they are defined in. + if (UserBB != BB && !LoopBBs.count(UserBB)) + return false; + } + } + + return true; + } + + //===--------------------------------------------------------------------===// + // APIs for updating loop information after changing the CFG + // + + /// addBasicBlockToLoop - This method is used by other analyses to update loop + /// information. NewBB is set to be a new member of the current loop. + /// Because of this, it is added as a member of all parent loops, and is added + /// to the specified LoopInfo object as being in the current basic block. It + /// is not valid to replace the loop header with this method. + /// + void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT> &LI); + + /// replaceChildLoopWith - This is used when splitting loops up. It replaces + /// the OldChild entry in our children list with NewChild, and updates the + /// parent pointer of OldChild to be null and the NewChild to be this loop. + /// This updates the loop depth of the new child. + void replaceChildLoopWith(LoopBase<BlockT> *OldChild, + LoopBase<BlockT> *NewChild) { + assert(OldChild->ParentLoop == this && "This loop is already broken!"); + assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!"); + typename std::vector<LoopBase<BlockT>*>::iterator I = + std::find(SubLoops.begin(), SubLoops.end(), OldChild); + assert(I != SubLoops.end() && "OldChild not in loop!"); + *I = NewChild; + OldChild->ParentLoop = 0; + NewChild->ParentLoop = this; + } + + /// addChildLoop - Add the specified loop to be a child of this loop. This + /// updates the loop depth of the new child. + /// + void addChildLoop(LoopBase<BlockT> *NewChild) { + assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!"); + NewChild->ParentLoop = this; + SubLoops.push_back(NewChild); + } + + /// removeChildLoop - This removes the specified child from being a subloop of + /// this loop. The loop is not deleted, as it will presumably be inserted + /// into another loop. + LoopBase<BlockT> *removeChildLoop(iterator I) { + assert(I != SubLoops.end() && "Cannot remove end iterator!"); + LoopBase<BlockT> *Child = *I; + assert(Child->ParentLoop == this && "Child is not a child of this loop!"); + SubLoops.erase(SubLoops.begin()+(I-begin())); + Child->ParentLoop = 0; + return Child; + } + + /// addBlockEntry - This adds a basic block directly to the basic block list. + /// This should only be used by transformations that create new loops. Other + /// transformations should use addBasicBlockToLoop. + void addBlockEntry(BlockT *BB) { + Blocks.push_back(BB); + } + + /// moveToHeader - This method is used to move BB (which must be part of this + /// loop) to be the loop header of the loop (the block that dominates all + /// others). + void moveToHeader(BlockT *BB) { + if (Blocks[0] == BB) return; + for (unsigned i = 0; ; ++i) { + assert(i != Blocks.size() && "Loop does not contain BB!"); + if (Blocks[i] == BB) { + Blocks[i] = Blocks[0]; + Blocks[0] = BB; + return; + } + } + } + + /// removeBlockFromLoop - This removes the specified basic block from the + /// current loop, updating the Blocks as appropriate. This does not update + /// the mapping in the LoopInfo class. + void removeBlockFromLoop(BlockT *BB) { + RemoveFromVector(Blocks, BB); + } + + /// verifyLoop - Verify loop structure + void verifyLoop() const { +#ifndef NDEBUG + assert (getHeader() && "Loop header is missing"); + assert (getLoopPreheader() && "Loop preheader is missing"); + assert (getLoopLatch() && "Loop latch is missing"); + for (iterator I = SubLoops.begin(), E = SubLoops.end(); I != E; ++I) + (*I)->verifyLoop(); +#endif + } + + void print(std::ostream &OS, unsigned Depth = 0) const { + OS << std::string(Depth*2, ' ') << "Loop at depth " << getLoopDepth() + << " containing: "; + + for (unsigned i = 0; i < getBlocks().size(); ++i) { + if (i) OS << ","; + BlockT *BB = getBlocks()[i]; + WriteAsOperand(OS, BB, false); + if (BB == getHeader()) OS << "<header>"; + if (BB == getLoopLatch()) OS << "<latch>"; + if (isLoopExit(BB)) OS << "<exit>"; + } + OS << "\n"; + + for (iterator I = begin(), E = end(); I != E; ++I) + (*I)->print(OS, Depth+2); + } + + void print(std::ostream *O, unsigned Depth = 0) const { + if (O) print(*O, Depth); + } + + void dump() const { + print(cerr); + } + +private: + friend class LoopInfoBase<BlockT>; + explicit LoopBase(BlockT *BB) : ParentLoop(0) { + Blocks.push_back(BB); + } +}; + + +//===----------------------------------------------------------------------===// +/// LoopInfo - This class builds and contains all of the top level loop +/// structures in the specified function. +/// + +template<class BlockT> +class LoopInfoBase { + // BBMap - Mapping of basic blocks to the inner most loop they occur in + std::map<BlockT*, LoopBase<BlockT>*> BBMap; + std::vector<LoopBase<BlockT>*> TopLevelLoops; + friend class LoopBase<BlockT>; + +public: + LoopInfoBase() { } + ~LoopInfoBase() { releaseMemory(); } + + void releaseMemory() { + for (typename std::vector<LoopBase<BlockT>* >::iterator I = + TopLevelLoops.begin(), E = TopLevelLoops.end(); I != E; ++I) + delete *I; // Delete all of the loops... + + BBMap.clear(); // Reset internal state of analysis + TopLevelLoops.clear(); + } + + /// iterator/begin/end - The interface to the top-level loops in the current + /// function. + /// + typedef typename std::vector<LoopBase<BlockT>*>::const_iterator iterator; + iterator begin() const { return TopLevelLoops.begin(); } + iterator end() const { return TopLevelLoops.end(); } + bool empty() const { return TopLevelLoops.empty(); } + + /// getLoopFor - Return the inner most loop that BB lives in. If a basic + /// block is in no loop (for example the entry node), null is returned. + /// + LoopBase<BlockT> *getLoopFor(const BlockT *BB) const { + typename std::map<BlockT *, LoopBase<BlockT>*>::const_iterator I= + BBMap.find(const_cast<BlockT*>(BB)); + return I != BBMap.end() ? I->second : 0; + } + + /// operator[] - same as getLoopFor... + /// + const LoopBase<BlockT> *operator[](const BlockT *BB) const { + return getLoopFor(BB); + } + + /// getLoopDepth - Return the loop nesting level of the specified block. A + /// depth of 0 means the block is not inside any loop. + /// + unsigned getLoopDepth(const BlockT *BB) const { + const LoopBase<BlockT> *L = getLoopFor(BB); + return L ? L->getLoopDepth() : 0; + } + + // isLoopHeader - True if the block is a loop header node + bool isLoopHeader(BlockT *BB) const { + const LoopBase<BlockT> *L = getLoopFor(BB); + return L && L->getHeader() == BB; + } + + /// removeLoop - This removes the specified top-level loop from this loop info + /// object. The loop is not deleted, as it will presumably be inserted into + /// another loop. + LoopBase<BlockT> *removeLoop(iterator I) { + assert(I != end() && "Cannot remove end iterator!"); + LoopBase<BlockT> *L = *I; + assert(L->getParentLoop() == 0 && "Not a top-level loop!"); + TopLevelLoops.erase(TopLevelLoops.begin() + (I-begin())); + return L; + } + + /// changeLoopFor - Change the top-level loop that contains BB to the + /// specified loop. This should be used by transformations that restructure + /// the loop hierarchy tree. + void changeLoopFor(BlockT *BB, LoopBase<BlockT> *L) { + LoopBase<BlockT> *&OldLoop = BBMap[BB]; + assert(OldLoop && "Block not in a loop yet!"); + OldLoop = L; + } + + /// changeTopLevelLoop - Replace the specified loop in the top-level loops + /// list with the indicated loop. + void changeTopLevelLoop(LoopBase<BlockT> *OldLoop, + LoopBase<BlockT> *NewLoop) { + typename std::vector<LoopBase<BlockT>*>::iterator I = + std::find(TopLevelLoops.begin(), TopLevelLoops.end(), OldLoop); + assert(I != TopLevelLoops.end() && "Old loop not at top level!"); + *I = NewLoop; + assert(NewLoop->ParentLoop == 0 && OldLoop->ParentLoop == 0 && + "Loops already embedded into a subloop!"); + } + + /// addTopLevelLoop - This adds the specified loop to the collection of + /// top-level loops. + void addTopLevelLoop(LoopBase<BlockT> *New) { + assert(New->getParentLoop() == 0 && "Loop already in subloop!"); + TopLevelLoops.push_back(New); + } + + /// removeBlock - This method completely removes BB from all data structures, + /// including all of the Loop objects it is nested in and our mapping from + /// BasicBlocks to loops. + void removeBlock(BlockT *BB) { + typename std::map<BlockT *, LoopBase<BlockT>*>::iterator I = BBMap.find(BB); + if (I != BBMap.end()) { + for (LoopBase<BlockT> *L = I->second; L; L = L->getParentLoop()) + L->removeBlockFromLoop(BB); + + BBMap.erase(I); + } + } + + // Internals + + static bool isNotAlreadyContainedIn(const LoopBase<BlockT> *SubLoop, + const LoopBase<BlockT> *ParentLoop) { + if (SubLoop == 0) return true; + if (SubLoop == ParentLoop) return false; + return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop); + } + + void Calculate(DominatorTreeBase<BlockT> &DT) { + BlockT *RootNode = DT.getRootNode()->getBlock(); + + for (df_iterator<BlockT*> NI = df_begin(RootNode), + NE = df_end(RootNode); NI != NE; ++NI) + if (LoopBase<BlockT> *L = ConsiderForLoop(*NI, DT)) + TopLevelLoops.push_back(L); + } + + LoopBase<BlockT> *ConsiderForLoop(BlockT *BB, DominatorTreeBase<BlockT> &DT) { + if (BBMap.find(BB) != BBMap.end()) return 0;// Haven't processed this node? + + std::vector<BlockT *> TodoStack; + + // Scan the predecessors of BB, checking to see if BB dominates any of + // them. This identifies backedges which target this node... + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + for (typename InvBlockTraits::ChildIteratorType I = + InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB); + I != E; ++I) + if (DT.dominates(BB, *I)) // If BB dominates it's predecessor... + TodoStack.push_back(*I); + + if (TodoStack.empty()) return 0; // No backedges to this block... + + // Create a new loop to represent this basic block... + LoopBase<BlockT> *L = new LoopBase<BlockT>(BB); + BBMap[BB] = L; + + BlockT *EntryBlock = BB->getParent()->begin(); + + while (!TodoStack.empty()) { // Process all the nodes in the loop + BlockT *X = TodoStack.back(); + TodoStack.pop_back(); + + if (!L->contains(X) && // As of yet unprocessed?? + DT.dominates(EntryBlock, X)) { // X is reachable from entry block? + // Check to see if this block already belongs to a loop. If this occurs + // then we have a case where a loop that is supposed to be a child of + // the current loop was processed before the current loop. When this + // occurs, this child loop gets added to a part of the current loop, + // making it a sibling to the current loop. We have to reparent this + // loop. + if (LoopBase<BlockT> *SubLoop = + const_cast<LoopBase<BlockT>*>(getLoopFor(X))) + if (SubLoop->getHeader() == X && isNotAlreadyContainedIn(SubLoop, L)){ + // Remove the subloop from it's current parent... + assert(SubLoop->ParentLoop && SubLoop->ParentLoop != L); + LoopBase<BlockT> *SLP = SubLoop->ParentLoop; // SubLoopParent + typename std::vector<LoopBase<BlockT>*>::iterator I = + std::find(SLP->SubLoops.begin(), SLP->SubLoops.end(), SubLoop); + assert(I != SLP->SubLoops.end() &&"SubLoop not a child of parent?"); + SLP->SubLoops.erase(I); // Remove from parent... + + // Add the subloop to THIS loop... + SubLoop->ParentLoop = L; + L->SubLoops.push_back(SubLoop); + } + + // Normal case, add the block to our loop... + L->Blocks.push_back(X); + + typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits; + + // Add all of the predecessors of X to the end of the work stack... + TodoStack.insert(TodoStack.end(), InvBlockTraits::child_begin(X), + InvBlockTraits::child_end(X)); + } + } + + // If there are any loops nested within this loop, create them now! + for (typename std::vector<BlockT*>::iterator I = L->Blocks.begin(), + E = L->Blocks.end(); I != E; ++I) + if (LoopBase<BlockT> *NewLoop = ConsiderForLoop(*I, DT)) { + L->SubLoops.push_back(NewLoop); + NewLoop->ParentLoop = L; + } + + // Add the basic blocks that comprise this loop to the BBMap so that this + // loop can be found for them. + // + for (typename std::vector<BlockT*>::iterator I = L->Blocks.begin(), + E = L->Blocks.end(); I != E; ++I) { + typename std::map<BlockT*, LoopBase<BlockT>*>::iterator BBMI = + BBMap.find(*I); + if (BBMI == BBMap.end()) // Not in map yet... + BBMap.insert(BBMI, std::make_pair(*I, L)); // Must be at this level + } + + // Now that we have a list of all of the child loops of this loop, check to + // see if any of them should actually be nested inside of each other. We + // can accidentally pull loops our of their parents, so we must make sure to + // organize the loop nests correctly now. + { + std::map<BlockT*, LoopBase<BlockT>*> ContainingLoops; + for (unsigned i = 0; i != L->SubLoops.size(); ++i) { + LoopBase<BlockT> *Child = L->SubLoops[i]; + assert(Child->getParentLoop() == L && "Not proper child loop?"); + + if (LoopBase<BlockT> *ContainingLoop = + ContainingLoops[Child->getHeader()]) { + // If there is already a loop which contains this loop, move this loop + // into the containing loop. + MoveSiblingLoopInto(Child, ContainingLoop); + --i; // The loop got removed from the SubLoops list. + } else { + // This is currently considered to be a top-level loop. Check to see + // if any of the contained blocks are loop headers for subloops we + // have already processed. + for (unsigned b = 0, e = Child->Blocks.size(); b != e; ++b) { + LoopBase<BlockT> *&BlockLoop = ContainingLoops[Child->Blocks[b]]; + if (BlockLoop == 0) { // Child block not processed yet... + BlockLoop = Child; + } else if (BlockLoop != Child) { + LoopBase<BlockT> *SubLoop = BlockLoop; + // Reparent all of the blocks which used to belong to BlockLoops + for (unsigned j = 0, e = SubLoop->Blocks.size(); j != e; ++j) + ContainingLoops[SubLoop->Blocks[j]] = Child; + + // There is already a loop which contains this block, that means + // that we should reparent the loop which the block is currently + // considered to belong to to be a child of this loop. + MoveSiblingLoopInto(SubLoop, Child); + --i; // We just shrunk the SubLoops list. + } + } + } + } + } + + return L; + } + + /// MoveSiblingLoopInto - This method moves the NewChild loop to live inside + /// of the NewParent Loop, instead of being a sibling of it. + void MoveSiblingLoopInto(LoopBase<BlockT> *NewChild, + LoopBase<BlockT> *NewParent) { + LoopBase<BlockT> *OldParent = NewChild->getParentLoop(); + assert(OldParent && OldParent == NewParent->getParentLoop() && + NewChild != NewParent && "Not sibling loops!"); + + // Remove NewChild from being a child of OldParent + typename std::vector<LoopBase<BlockT>*>::iterator I = + std::find(OldParent->SubLoops.begin(), OldParent->SubLoops.end(), + NewChild); + assert(I != OldParent->SubLoops.end() && "Parent fields incorrect??"); + OldParent->SubLoops.erase(I); // Remove from parent's subloops list + NewChild->ParentLoop = 0; + + InsertLoopInto(NewChild, NewParent); + } + + /// InsertLoopInto - This inserts loop L into the specified parent loop. If + /// the parent loop contains a loop which should contain L, the loop gets + /// inserted into L instead. + void InsertLoopInto(LoopBase<BlockT> *L, LoopBase<BlockT> *Parent) { + BlockT *LHeader = L->getHeader(); + assert(Parent->contains(LHeader) && + "This loop should not be inserted here!"); + + // Check to see if it belongs in a child loop... + for (unsigned i = 0, e = static_cast<unsigned>(Parent->SubLoops.size()); + i != e; ++i) + if (Parent->SubLoops[i]->contains(LHeader)) { + InsertLoopInto(L, Parent->SubLoops[i]); + return; + } + + // If not, insert it here! + Parent->SubLoops.push_back(L); + L->ParentLoop = Parent; + } + + // Debugging + + void print(std::ostream &OS, const Module* ) const { + for (unsigned i = 0; i < TopLevelLoops.size(); ++i) + TopLevelLoops[i]->print(OS); + #if 0 + for (std::map<BasicBlock*, Loop*>::const_iterator I = BBMap.begin(), + E = BBMap.end(); I != E; ++I) + OS << "BB '" << I->first->getName() << "' level = " + << I->second->getLoopDepth() << "\n"; + #endif + } +}; + +class LoopInfo : public FunctionPass { + LoopInfoBase<BasicBlock>* LI; + friend class LoopBase<BasicBlock>; + +public: + static char ID; // Pass identification, replacement for typeid + + LoopInfo() : FunctionPass(&ID) { + LI = new LoopInfoBase<BasicBlock>(); + } + + ~LoopInfo() { delete LI; } + + LoopInfoBase<BasicBlock>& getBase() { return *LI; } + + /// iterator/begin/end - The interface to the top-level loops in the current + /// function. + /// + typedef std::vector<Loop*>::const_iterator iterator; + inline iterator begin() const { return LI->begin(); } + inline iterator end() const { return LI->end(); } + bool empty() const { return LI->empty(); } + + /// getLoopFor - Return the inner most loop that BB lives in. If a basic + /// block is in no loop (for example the entry node), null is returned. + /// + inline Loop *getLoopFor(const BasicBlock *BB) const { + return LI->getLoopFor(BB); + } + + /// operator[] - same as getLoopFor... + /// + inline const Loop *operator[](const BasicBlock *BB) const { + return LI->getLoopFor(BB); + } + + /// getLoopDepth - Return the loop nesting level of the specified block. A + /// depth of 0 means the block is not inside any loop. + /// + inline unsigned getLoopDepth(const BasicBlock *BB) const { + return LI->getLoopDepth(BB); + } + + // isLoopHeader - True if the block is a loop header node + inline bool isLoopHeader(BasicBlock *BB) const { + return LI->isLoopHeader(BB); + } + + /// runOnFunction - Calculate the natural loop information. + /// + virtual bool runOnFunction(Function &F); + + virtual void releaseMemory() { LI->releaseMemory(); } + + virtual void print(std::ostream &O, const Module* M = 0) const { + if (O) LI->print(O, M); + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + /// removeLoop - This removes the specified top-level loop from this loop info + /// object. The loop is not deleted, as it will presumably be inserted into + /// another loop. + inline Loop *removeLoop(iterator I) { return LI->removeLoop(I); } + + /// changeLoopFor - Change the top-level loop that contains BB to the + /// specified loop. This should be used by transformations that restructure + /// the loop hierarchy tree. + inline void changeLoopFor(BasicBlock *BB, Loop *L) { + LI->changeLoopFor(BB, L); + } + + /// changeTopLevelLoop - Replace the specified loop in the top-level loops + /// list with the indicated loop. + inline void changeTopLevelLoop(Loop *OldLoop, Loop *NewLoop) { + LI->changeTopLevelLoop(OldLoop, NewLoop); + } + + /// addTopLevelLoop - This adds the specified loop to the collection of + /// top-level loops. + inline void addTopLevelLoop(Loop *New) { + LI->addTopLevelLoop(New); + } + + /// removeBlock - This method completely removes BB from all data structures, + /// including all of the Loop objects it is nested in and our mapping from + /// BasicBlocks to loops. + void removeBlock(BasicBlock *BB) { + LI->removeBlock(BB); + } +}; + + +// Allow clients to walk the list of nested loops... +template <> struct GraphTraits<const Loop*> { + typedef const Loop NodeType; + typedef std::vector<Loop*>::const_iterator ChildIteratorType; + + static NodeType *getEntryNode(const Loop *L) { return L; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->end(); + } +}; + +template <> struct GraphTraits<Loop*> { + typedef Loop NodeType; + typedef std::vector<Loop*>::const_iterator ChildIteratorType; + + static NodeType *getEntryNode(Loop *L) { return L; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->end(); + } +}; + +template<class BlockT> +void LoopBase<BlockT>::addBasicBlockToLoop(BlockT *NewBB, + LoopInfoBase<BlockT> &LIB) { + assert((Blocks.empty() || LIB[getHeader()] == this) && + "Incorrect LI specified for this loop!"); + assert(NewBB && "Cannot add a null basic block to the loop!"); + assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!"); + + // Add the loop mapping to the LoopInfo object... + LIB.BBMap[NewBB] = this; + + // Add the basic block to this loop and all parent loops... + LoopBase<BlockT> *L = this; + while (L) { + L->Blocks.push_back(NewBB); + L = L->getParentLoop(); + } +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/LoopPass.h b/include/llvm/Analysis/LoopPass.h new file mode 100644 index 0000000..ca41e51 --- /dev/null +++ b/include/llvm/Analysis/LoopPass.h @@ -0,0 +1,150 @@ +//===- LoopPass.h - LoopPass class ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines LoopPass class. All loop optimization +// and transformation passes are derived from LoopPass. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LOOP_PASS_H +#define LLVM_LOOP_PASS_H + +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Pass.h" +#include "llvm/PassManagers.h" +#include "llvm/Function.h" + +namespace llvm { + +class LPPassManager; +class Function; +class PMStack; + +class LoopPass : public Pass { +public: + explicit LoopPass(intptr_t pid) : Pass(pid) {} + explicit LoopPass(void *pid) : Pass(pid) {} + + // runOnLoop - This method should be implemented by the subclass to perform + // whatever action is necessary for the specified Loop. + virtual bool runOnLoop(Loop *L, LPPassManager &LPM) = 0; + virtual bool runOnFunctionBody(Function &F, LPPassManager &LPM) { + return false; + } + + // Initialization and finalization hooks. + virtual bool doInitialization(Loop *L, LPPassManager &LPM) { + return false; + } + + // Finalization hook does not supply Loop because at this time + // loop nest is completely different. + virtual bool doFinalization() { return false; } + + // Check if this pass is suitable for the current LPPassManager, if + // available. This pass P is not suitable for a LPPassManager if P + // is not preserving higher level analysis info used by other + // LPPassManager passes. In such case, pop LPPassManager from the + // stack. This will force assignPassManager() to create new + // LPPassManger as expected. + void preparePassManager(PMStack &PMS); + + /// Assign pass manager to manager this pass + virtual void assignPassManager(PMStack &PMS, + PassManagerType PMT = PMT_LoopPassManager); + + /// Return what kind of Pass Manager can manage this pass. + virtual PassManagerType getPotentialPassManagerType() const { + return PMT_LoopPassManager; + } + + //===--------------------------------------------------------------------===// + /// SimpleAnalysis - Provides simple interface to update analysis info + /// maintained by various passes. Note, if required this interface can + /// be extracted into a separate abstract class but it would require + /// additional use of multiple inheritance in Pass class hierarchy, something + /// we are trying to avoid. + + /// Each loop pass can override these simple analysis hooks to update + /// desired analysis information. + /// cloneBasicBlockAnalysis - Clone analysis info associated with basic block. + virtual void cloneBasicBlockAnalysis(BasicBlock *F, BasicBlock *T, Loop *L) {} + + /// deletekAnalysisValue - Delete analysis info associated with value V. + virtual void deleteAnalysisValue(Value *V, Loop *L) {} +}; + +class LPPassManager : public FunctionPass, public PMDataManager { +public: + static char ID; + explicit LPPassManager(int Depth); + + /// run - Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the module, and if so, return true. + bool runOnFunction(Function &F); + + /// Pass Manager itself does not invalidate any analysis info. + // LPPassManager needs LoopInfo. + void getAnalysisUsage(AnalysisUsage &Info) const; + + virtual const char *getPassName() const { + return "Loop Pass Manager"; + } + + /// Print passes managed by this manager + void dumpPassStructure(unsigned Offset); + + Pass *getContainedPass(unsigned N) { + assert(N < PassVector.size() && "Pass number out of range!"); + Pass *FP = static_cast<Pass *>(PassVector[N]); + return FP; + } + + virtual PassManagerType getPassManagerType() const { + return PMT_LoopPassManager; + } + +public: + // Delete loop from the loop queue and loop nest (LoopInfo). + void deleteLoopFromQueue(Loop *L); + + // Insert loop into the loop nest(LoopInfo) and loop queue(LQ). + void insertLoop(Loop *L, Loop *ParentLoop); + + // Reoptimize this loop. LPPassManager will re-insert this loop into the + // queue. This allows LoopPass to change loop nest for the loop. This + // utility may send LPPassManager into infinite loops so use caution. + void redoLoop(Loop *L); + + //===--------------------------------------------------------------------===// + /// SimpleAnalysis - Provides simple interface to update analysis info + /// maintained by various passes. Note, if required this interface can + /// be extracted into a separate abstract class but it would require + /// additional use of multiple inheritance in Pass class hierarchy, something + /// we are trying to avoid. + + /// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for + /// all passes that implement simple analysis interface. + void cloneBasicBlockSimpleAnalysis(BasicBlock *From, BasicBlock *To, Loop *L); + + /// deleteSimpleAnalysisValue - Invoke deleteAnalysisValue hook for all passes + /// that implement simple analysis interface. + void deleteSimpleAnalysisValue(Value *V, Loop *L); + +private: + std::deque<Loop *> LQ; + bool skipThisLoop; + bool redoThisLoop; + LoopInfo *LI; + Loop *CurrentLoop; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/LoopVR.h b/include/llvm/Analysis/LoopVR.h new file mode 100644 index 0000000..1d806f8 --- /dev/null +++ b/include/llvm/Analysis/LoopVR.h @@ -0,0 +1,90 @@ +//===- LoopVR.cpp - Value Range analysis driven by loop information -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface for the loop-driven value range pass. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LOOPVR_H +#define LLVM_ANALYSIS_LOOPVR_H + +#include "llvm/Pass.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Support/ConstantRange.h" +#include <iosfwd> +#include <map> + +namespace llvm { + +/// LoopVR - This class maintains a mapping of Values to ConstantRanges. +/// There are interfaces to look up and update ranges by value, and for +/// accessing all values with range information. +/// +class LoopVR : public FunctionPass { +public: + static char ID; // Class identification, replacement for typeinfo + + LoopVR() : FunctionPass(&ID) {} + + bool runOnFunction(Function &F); + virtual void print(std::ostream &os, const Module *) const; + void releaseMemory(); + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequiredTransitive<LoopInfo>(); + AU.addRequiredTransitive<ScalarEvolution>(); + AU.setPreservesAll(); + } + + //===--------------------------------------------------------------------- + // Methods that are used to look up and update particular values. + + /// get - return the ConstantRange for a given Value of IntegerType. + ConstantRange get(Value *V); + + /// remove - remove a value from this analysis. + void remove(Value *V); + + /// narrow - improve our unterstanding of a Value by pointing out that it + /// must fall within ConstantRange. To replace a range, remove it first. + void narrow(Value *V, const ConstantRange &CR); + + //===--------------------------------------------------------------------- + // Methods that are used to iterate across all values with information. + + /// size - returns the number of Values with information + unsigned size() const { return Map.size(); } + + typedef std::map<Value *, ConstantRange *>::iterator iterator; + + /// begin - return an iterator to the first Value, ConstantRange pair + iterator begin() { return Map.begin(); } + + /// end - return an iterator one past the last Value, ConstantRange pair + iterator end() { return Map.end(); } + + /// getValue - return the Value referenced by an iterator + Value *getValue(iterator I) { return I->first; } + + /// getConstantRange - return the ConstantRange referenced by an iterator + ConstantRange getConstantRange(iterator I) { return *I->second; } + +private: + ConstantRange compute(Value *V); + + ConstantRange getRange(SCEVHandle S, Loop *L, ScalarEvolution &SE); + + ConstantRange getRange(SCEVHandle S, SCEVHandle T, ScalarEvolution &SE); + + std::map<Value *, ConstantRange *> Map; +}; + +} // end llvm namespace + +#endif diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h new file mode 100644 index 0000000..d7d795e --- /dev/null +++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h @@ -0,0 +1,287 @@ +//===- llvm/Analysis/MemoryDependenceAnalysis.h - Memory Deps --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MemoryDependenceAnalysis analysis pass. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_MEMORY_DEPENDENCE_H +#define LLVM_ANALYSIS_MEMORY_DEPENDENCE_H + +#include "llvm/BasicBlock.h" +#include "llvm/Pass.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/PointerIntPair.h" + +namespace llvm { + class Function; + class FunctionPass; + class Instruction; + class CallSite; + class AliasAnalysis; + class TargetData; + class MemoryDependenceAnalysis; + class PredIteratorCache; + + /// MemDepResult - A memory dependence query can return one of three different + /// answers, described below. + class MemDepResult { + enum DepType { + /// Invalid - Clients of MemDep never see this. + Invalid = 0, + + /// Clobber - This is a dependence on the specified instruction which + /// clobbers the desired value. The pointer member of the MemDepResult + /// pair holds the instruction that clobbers the memory. For example, + /// this occurs when we see a may-aliased store to the memory location we + /// care about. + Clobber, + + /// Def - This is a dependence on the specified instruction which + /// defines/produces the desired memory location. The pointer member of + /// the MemDepResult pair holds the instruction that defines the memory. + /// Cases of interest: + /// 1. This could be a load or store for dependence queries on + /// load/store. The value loaded or stored is the produced value. + /// Note that the pointer operand may be different than that of the + /// queried pointer due to must aliases and phi translation. Note + /// that the def may not be the same type as the query, the pointers + /// may just be must aliases. + /// 2. For loads and stores, this could be an allocation instruction. In + /// this case, the load is loading an undef value or a store is the + /// first store to (that part of) the allocation. + /// 3. Dependence queries on calls return Def only when they are + /// readonly calls with identical callees and no intervening + /// clobbers. No validation is done that the operands to the calls + /// are the same. + Def, + + /// NonLocal - This marker indicates that the query has no dependency in + /// the specified block. To find out more, the client should query other + /// predecessor blocks. + NonLocal + }; + typedef PointerIntPair<Instruction*, 2, DepType> PairTy; + PairTy Value; + explicit MemDepResult(PairTy V) : Value(V) {} + public: + MemDepResult() : Value(0, Invalid) {} + + /// get methods: These are static ctor methods for creating various + /// MemDepResult kinds. + static MemDepResult getDef(Instruction *Inst) { + return MemDepResult(PairTy(Inst, Def)); + } + static MemDepResult getClobber(Instruction *Inst) { + return MemDepResult(PairTy(Inst, Clobber)); + } + static MemDepResult getNonLocal() { + return MemDepResult(PairTy(0, NonLocal)); + } + + /// isClobber - Return true if this MemDepResult represents a query that is + /// a instruction clobber dependency. + bool isClobber() const { return Value.getInt() == Clobber; } + + /// isDef - Return true if this MemDepResult represents a query that is + /// a instruction definition dependency. + bool isDef() const { return Value.getInt() == Def; } + + /// isNonLocal - Return true if this MemDepResult represents an query that + /// is transparent to the start of the block, but where a non-local hasn't + /// been done. + bool isNonLocal() const { return Value.getInt() == NonLocal; } + + /// getInst() - If this is a normal dependency, return the instruction that + /// is depended on. Otherwise, return null. + Instruction *getInst() const { return Value.getPointer(); } + + bool operator==(const MemDepResult &M) const { return Value == M.Value; } + bool operator!=(const MemDepResult &M) const { return Value != M.Value; } + bool operator<(const MemDepResult &M) const { return Value < M.Value; } + bool operator>(const MemDepResult &M) const { return Value > M.Value; } + private: + friend class MemoryDependenceAnalysis; + /// Dirty - Entries with this marker occur in a LocalDeps map or + /// NonLocalDeps map when the instruction they previously referenced was + /// removed from MemDep. In either case, the entry may include an + /// instruction pointer. If so, the pointer is an instruction in the + /// block where scanning can start from, saving some work. + /// + /// In a default-constructed MemDepResult object, the type will be Dirty + /// and the instruction pointer will be null. + /// + + /// isDirty - Return true if this is a MemDepResult in its dirty/invalid. + /// state. + bool isDirty() const { return Value.getInt() == Invalid; } + + static MemDepResult getDirty(Instruction *Inst) { + return MemDepResult(PairTy(Inst, Invalid)); + } + }; + + /// MemoryDependenceAnalysis - This is an analysis that determines, for a + /// given memory operation, what preceding memory operations it depends on. + /// It builds on alias analysis information, and tries to provide a lazy, + /// caching interface to a common kind of alias information query. + /// + /// The dependency information returned is somewhat unusual, but is pragmatic. + /// If queried about a store or call that might modify memory, the analysis + /// will return the instruction[s] that may either load from that memory or + /// store to it. If queried with a load or call that can never modify memory, + /// the analysis will return calls and stores that might modify the pointer, + /// but generally does not return loads unless a) they are volatile, or + /// b) they load from *must-aliased* pointers. Returning a dependence on + /// must-alias'd pointers instead of all pointers interacts well with the + /// internal caching mechanism. + /// + class MemoryDependenceAnalysis : public FunctionPass { + // A map from instructions to their dependency. + typedef DenseMap<Instruction*, MemDepResult> LocalDepMapType; + LocalDepMapType LocalDeps; + + public: + typedef std::pair<BasicBlock*, MemDepResult> NonLocalDepEntry; + typedef std::vector<NonLocalDepEntry> NonLocalDepInfo; + private: + /// ValueIsLoadPair - This is a pair<Value*, bool> where the bool is true if + /// the dependence is a read only dependence, false if read/write. + typedef PointerIntPair<Value*, 1, bool> ValueIsLoadPair; + + /// BBSkipFirstBlockPair - This pair is used when caching information for a + /// block. If the pointer is null, the cache value is not a full query that + /// starts at the specified block. If non-null, the bool indicates whether + /// or not the contents of the block was skipped. + typedef PointerIntPair<BasicBlock*, 1, bool> BBSkipFirstBlockPair; + + /// CachedNonLocalPointerInfo - This map stores the cached results of doing + /// a pointer lookup at the bottom of a block. The key of this map is the + /// pointer+isload bit, the value is a list of <bb->result> mappings. + typedef DenseMap<ValueIsLoadPair, std::pair<BBSkipFirstBlockPair, + NonLocalDepInfo> > CachedNonLocalPointerInfo; + CachedNonLocalPointerInfo NonLocalPointerDeps; + + // A map from instructions to their non-local pointer dependencies. + typedef DenseMap<Instruction*, + SmallPtrSet<ValueIsLoadPair, 4> > ReverseNonLocalPtrDepTy; + ReverseNonLocalPtrDepTy ReverseNonLocalPtrDeps; + + + /// PerInstNLInfo - This is the instruction we keep for each cached access + /// that we have for an instruction. The pointer is an owning pointer and + /// the bool indicates whether we have any dirty bits in the set. + typedef std::pair<NonLocalDepInfo, bool> PerInstNLInfo; + + // A map from instructions to their non-local dependencies. + typedef DenseMap<Instruction*, PerInstNLInfo> NonLocalDepMapType; + + NonLocalDepMapType NonLocalDeps; + + // A reverse mapping from dependencies to the dependees. This is + // used when removing instructions to keep the cache coherent. + typedef DenseMap<Instruction*, + SmallPtrSet<Instruction*, 4> > ReverseDepMapType; + ReverseDepMapType ReverseLocalDeps; + + // A reverse mapping form dependencies to the non-local dependees. + ReverseDepMapType ReverseNonLocalDeps; + + /// Current AA implementation, just a cache. + AliasAnalysis *AA; + TargetData *TD; + OwningPtr<PredIteratorCache> PredCache; + public: + MemoryDependenceAnalysis(); + ~MemoryDependenceAnalysis(); + static char ID; + + /// Pass Implementation stuff. This doesn't do any analysis eagerly. + bool runOnFunction(Function &); + + /// Clean up memory in between runs + void releaseMemory(); + + /// getAnalysisUsage - Does not modify anything. It uses Value Numbering + /// and Alias Analysis. + /// + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + /// getDependency - Return the instruction on which a memory operation + /// depends. See the class comment for more details. It is illegal to call + /// this on non-memory instructions. + MemDepResult getDependency(Instruction *QueryInst); + + /// getNonLocalCallDependency - Perform a full dependency query for the + /// specified call, returning the set of blocks that the value is + /// potentially live across. The returned set of results will include a + /// "NonLocal" result for all blocks where the value is live across. + /// + /// This method assumes the instruction returns a "NonLocal" dependency + /// within its own block. + /// + /// This returns a reference to an internal data structure that may be + /// invalidated on the next non-local query or when an instruction is + /// removed. Clients must copy this data if they want it around longer than + /// that. + const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS); + + + /// getNonLocalPointerDependency - Perform a full dependency query for an + /// access to the specified (non-volatile) memory location, returning the + /// set of instructions that either define or clobber the value. + /// + /// This method assumes the pointer has a "NonLocal" dependency within BB. + void getNonLocalPointerDependency(Value *Pointer, bool isLoad, + BasicBlock *BB, + SmallVectorImpl<NonLocalDepEntry> &Result); + + /// removeInstruction - Remove an instruction from the dependence analysis, + /// updating the dependence of instructions that previously depended on it. + void removeInstruction(Instruction *InstToRemove); + + /// invalidateCachedPointerInfo - This method is used to invalidate cached + /// information about the specified pointer, because it may be too + /// conservative in memdep. This is an optional call that can be used when + /// the client detects an equivalence between the pointer and some other + /// value and replaces the other value with ptr. This can make Ptr available + /// in more places that cached info does not necessarily keep. + void invalidateCachedPointerInfo(Value *Ptr); + + private: + MemDepResult getPointerDependencyFrom(Value *Pointer, uint64_t MemSize, + bool isLoad, + BasicBlock::iterator ScanIt, + BasicBlock *BB); + MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall, + BasicBlock::iterator ScanIt, + BasicBlock *BB); + bool getNonLocalPointerDepFromBB(Value *Pointer, uint64_t Size, + bool isLoad, BasicBlock *BB, + SmallVectorImpl<NonLocalDepEntry> &Result, + DenseMap<BasicBlock*, Value*> &Visited, + bool SkipFirstBlock = false); + MemDepResult GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize, + bool isLoad, BasicBlock *BB, + NonLocalDepInfo *Cache, + unsigned NumSortedEntries); + + void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P); + + /// verifyRemoved - Verify that the specified instruction does not occur + /// in our internal data structures. + void verifyRemoved(Instruction *Inst) const; + + }; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/Passes.h b/include/llvm/Analysis/Passes.h new file mode 100644 index 0000000..d9121a8 --- /dev/null +++ b/include/llvm/Analysis/Passes.h @@ -0,0 +1,128 @@ +//===-- llvm/Analysis/Passes.h - Constructors for analyses ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes for accessor functions that expose passes +// in the analysis libraries. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_PASSES_H +#define LLVM_ANALYSIS_PASSES_H + +namespace llvm { + class FunctionPass; + class ImmutablePass; + class ModulePass; + class Pass; + class LibCallInfo; + + //===--------------------------------------------------------------------===// + // + // createGlobalsModRefPass - This pass provides alias and mod/ref info for + // global values that do not have their addresses taken. + // + Pass *createGlobalsModRefPass(); + + //===--------------------------------------------------------------------===// + // + // createAliasDebugger - This pass helps debug clients of AA + // + Pass *createAliasDebugger(); + + //===--------------------------------------------------------------------===// + // + // createAliasAnalysisCounterPass - This pass counts alias queries and how the + // alias analysis implementation responds. + // + ModulePass *createAliasAnalysisCounterPass(); + + //===--------------------------------------------------------------------===// + // + // createAAEvalPass - This pass implements a simple N^2 alias analysis + // accuracy evaluator. + // + FunctionPass *createAAEvalPass(); + + //===--------------------------------------------------------------------===// + // + // createNoAAPass - This pass implements a "I don't know" alias analysis. + // + ImmutablePass *createNoAAPass(); + + //===--------------------------------------------------------------------===// + // + // createBasicAliasAnalysisPass - This pass implements the default alias + // analysis. + // + ImmutablePass *createBasicAliasAnalysisPass(); + + //===--------------------------------------------------------------------===// + // + /// createLibCallAliasAnalysisPass - Create an alias analysis pass that knows + /// about the semantics of a set of libcalls specified by LCI. The newly + /// constructed pass takes ownership of the pointer that is provided. + /// + FunctionPass *createLibCallAliasAnalysisPass(LibCallInfo *LCI); + + //===--------------------------------------------------------------------===// + // + // createAndersensPass - This pass implements Andersen's interprocedural alias + // analysis. + // + ModulePass *createAndersensPass(); + + //===--------------------------------------------------------------------===// + // + // createProfileLoaderPass - This pass loads information from a profile dump + // file. + // + ModulePass *createProfileLoaderPass(); + + //===--------------------------------------------------------------------===// + // + // createNoProfileInfoPass - This pass implements the default "no profile". + // + ImmutablePass *createNoProfileInfoPass(); + + //===--------------------------------------------------------------------===// + // + // createDSAAPass - This pass implements simple context sensitive alias + // analysis. + // + ModulePass *createDSAAPass(); + + //===--------------------------------------------------------------------===// + // + // createDSOptPass - This pass uses DSA to do a series of simple + // optimizations. + // + ModulePass *createDSOptPass(); + + //===--------------------------------------------------------------------===// + // + // createSteensgaardPass - This pass uses the data structure graphs to do a + // simple context insensitive alias analysis. + // + ModulePass *createSteensgaardPass(); + + //===--------------------------------------------------------------------===// + // + // createLiveValuesPass - This creates an instance of the LiveValues pass. + // + FunctionPass *createLiveValuesPass(); + + // Minor pass prototypes, allowing us to expose them through bugpoint and + // analyze. + FunctionPass *createInstCountPass(); + + // print debug info intrinsics in human readable form + FunctionPass *createDbgInfoPrinterPass(); +} + +#endif diff --git a/include/llvm/Analysis/PostDominators.h b/include/llvm/Analysis/PostDominators.h new file mode 100644 index 0000000..cd6af74 --- /dev/null +++ b/include/llvm/Analysis/PostDominators.h @@ -0,0 +1,98 @@ +//=- llvm/Analysis/PostDominators.h - Post Dominator Calculation-*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes interfaces to post dominance information. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_POST_DOMINATORS_H +#define LLVM_ANALYSIS_POST_DOMINATORS_H + +#include "llvm/Analysis/Dominators.h" + +namespace llvm { + +/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to +/// compute the a post-dominator tree. +/// +struct PostDominatorTree : public FunctionPass { + static char ID; // Pass identification, replacement for typeid + DominatorTreeBase<BasicBlock>* DT; + + PostDominatorTree() : FunctionPass(&ID) { + DT = new DominatorTreeBase<BasicBlock>(true); + } + + ~PostDominatorTree(); + + virtual bool runOnFunction(Function &F); + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + + inline const std::vector<BasicBlock*> &getRoots() const { + return DT->getRoots(); + } + + inline DomTreeNode *getRootNode() const { + return DT->getRootNode(); + } + + inline DomTreeNode *operator[](BasicBlock *BB) const { + return DT->getNode(BB); + } + + inline bool properlyDominates(const DomTreeNode* A, DomTreeNode* B) const { + return DT->properlyDominates(A, B); + } + + inline bool properlyDominates(BasicBlock* A, BasicBlock* B) const { + return DT->properlyDominates(A, B); + } + + virtual void print(std::ostream &OS, const Module* M= 0) const { + DT->print(OS, M); + } +}; + +FunctionPass* createPostDomTree(); + +/// PostDominanceFrontier Class - Concrete subclass of DominanceFrontier that is +/// used to compute the a post-dominance frontier. +/// +struct PostDominanceFrontier : public DominanceFrontierBase { + static char ID; + PostDominanceFrontier() + : DominanceFrontierBase(&ID, true) {} + + virtual bool runOnFunction(Function &) { + Frontiers.clear(); + PostDominatorTree &DT = getAnalysis<PostDominatorTree>(); + Roots = DT.getRoots(); + if (const DomTreeNode *Root = DT.getRootNode()) + calculate(DT, Root); + return false; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + AU.addRequired<PostDominatorTree>(); + } + +private: + const DomSetType &calculate(const PostDominatorTree &DT, + const DomTreeNode *Node); +}; + +FunctionPass* createPostDomFrontier(); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/ProfileInfo.h b/include/llvm/Analysis/ProfileInfo.h new file mode 100644 index 0000000..ff83f97 --- /dev/null +++ b/include/llvm/Analysis/ProfileInfo.h @@ -0,0 +1,67 @@ +//===- llvm/Analysis/ProfileInfo.h - Profile Info Interface -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the generic ProfileInfo interface, which is used as the +// common interface used by all clients of profiling information, and +// implemented either by making static guestimations, or by actually reading in +// profiling information gathered by running the program. +// +// Note that to be useful, all profile-based optimizations should preserve +// ProfileInfo, which requires that they notify it when changes to the CFG are +// made. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_PROFILEINFO_H +#define LLVM_ANALYSIS_PROFILEINFO_H + +#include <string> +#include <map> + +namespace llvm { + class BasicBlock; + class Pass; + + /// ProfileInfo Class - This class holds and maintains edge profiling + /// information for some unit of code. + class ProfileInfo { + protected: + // EdgeCounts - Count the number of times a transition between two blocks is + // executed. As a special case, we also hold an edge from the null + // BasicBlock to the entry block to indicate how many times the function was + // entered. + std::map<std::pair<BasicBlock*, BasicBlock*>, unsigned> EdgeCounts; + public: + static char ID; // Class identification, replacement for typeinfo + virtual ~ProfileInfo(); // We want to be subclassed + + //===------------------------------------------------------------------===// + /// Profile Information Queries + /// + unsigned getExecutionCount(BasicBlock *BB) const; + + unsigned getEdgeWeight(BasicBlock *Src, BasicBlock *Dest) const { + std::map<std::pair<BasicBlock*, BasicBlock*>, unsigned>::const_iterator I= + EdgeCounts.find(std::make_pair(Src, Dest)); + return I != EdgeCounts.end() ? I->second : 0; + } + + //===------------------------------------------------------------------===// + /// Analysis Update Methods + /// + + }; + + /// createProfileLoaderPass - This function returns a Pass that loads the + /// profiling information for the module from the specified filename, making + /// it available to the optimizers. + Pass *createProfileLoaderPass(const std::string &Filename); +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/ProfileInfoLoader.h b/include/llvm/Analysis/ProfileInfoLoader.h new file mode 100644 index 0000000..8a5141a --- /dev/null +++ b/include/llvm/Analysis/ProfileInfoLoader.h @@ -0,0 +1,89 @@ +//===- ProfileInfoLoader.h - Load & convert profile information -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The ProfileInfoLoader class is used to load and represent profiling +// information read in from the dump file. If conversions between formats are +// needed, it can also do this. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_PROFILEINFOLOADER_H +#define LLVM_ANALYSIS_PROFILEINFOLOADER_H + +#include <vector> +#include <string> +#include <utility> + +namespace llvm { + +class Module; +class Function; +class BasicBlock; + +class ProfileInfoLoader { + Module &M; + std::vector<std::string> CommandLines; + std::vector<unsigned> FunctionCounts; + std::vector<unsigned> BlockCounts; + std::vector<unsigned> EdgeCounts; + std::vector<unsigned> BBTrace; +public: + // ProfileInfoLoader ctor - Read the specified profiling data file, exiting + // the program if the file is invalid or broken. + ProfileInfoLoader(const char *ToolName, const std::string &Filename, + Module &M); + + unsigned getNumExecutions() const { return CommandLines.size(); } + const std::string &getExecution(unsigned i) const { return CommandLines[i]; } + + // getFunctionCounts - This method is used by consumers of function counting + // information. If we do not directly have function count information, we + // compute it from other, more refined, types of profile information. + // + void getFunctionCounts(std::vector<std::pair<Function*, unsigned> > &Counts); + + // hasAccurateBlockCounts - Return true if we can synthesize accurate block + // frequency information from whatever we have. + // + bool hasAccurateBlockCounts() const { + return !BlockCounts.empty() || !EdgeCounts.empty(); + } + + // hasAccurateEdgeCounts - Return true if we can synthesize accurate edge + // frequency information from whatever we have. + // + bool hasAccurateEdgeCounts() const { + return !EdgeCounts.empty(); + } + + // getBlockCounts - This method is used by consumers of block counting + // information. If we do not directly have block count information, we + // compute it from other, more refined, types of profile information. + // + void getBlockCounts(std::vector<std::pair<BasicBlock*, unsigned> > &Counts); + + // getEdgeCounts - This method is used by consumers of edge counting + // information. If we do not directly have edge count information, we compute + // it from other, more refined, types of profile information. + // + // Edges are represented as a pair, where the first element is the basic block + // and the second element is the successor number. + // + typedef std::pair<BasicBlock*, unsigned> Edge; + void getEdgeCounts(std::vector<std::pair<Edge, unsigned> > &Counts); + + // getBBTrace - This method is used by consumers of basic-block trace + // information. + // + void getBBTrace(std::vector<BasicBlock *> &Trace); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/ProfileInfoTypes.h b/include/llvm/Analysis/ProfileInfoTypes.h new file mode 100644 index 0000000..f311f8c --- /dev/null +++ b/include/llvm/Analysis/ProfileInfoTypes.h @@ -0,0 +1,28 @@ +/*===-- ProfileInfoTypes.h - Profiling info shared constants ------*- C -*-===*\ +|* +|* The LLVM Compiler Infrastructure +|* +|* This file is distributed under the University of Illinois Open Source +|* License. See LICENSE.TXT for details. +|* +|*===----------------------------------------------------------------------===*| +|* +|* This file defines constants shared by the various different profiling +|* runtime libraries and the LLVM C++ profile info loader. It must be a +|* C header because, at present, the profiling runtimes are written in C. +|* +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_ANALYSIS_PROFILEINFOTYPES_H +#define LLVM_ANALYSIS_PROFILEINFOTYPES_H + +enum ProfilingType { + ArgumentInfo = 1, /* The command line argument block */ + FunctionInfo = 2, /* Function profiling information */ + BlockInfo = 3, /* Block profiling information */ + EdgeInfo = 4, /* Edge profiling information */ + PathInfo = 5, /* Path profiling information */ + BBTraceInfo = 6 /* Basic block trace information */ +}; + +#endif /* LLVM_ANALYSIS_PROFILEINFOTYPES_H */ diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h new file mode 100644 index 0000000..88002cb --- /dev/null +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -0,0 +1,546 @@ +//===- llvm/Analysis/ScalarEvolution.h - Scalar Evolution -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The ScalarEvolution class is an LLVM pass which can be used to analyze and +// catagorize scalar expressions in loops. It specializes in recognizing +// general induction variables, representing them with the abstract and opaque +// SCEV class. Given this analysis, trip counts of loops and other important +// properties can be obtained. +// +// This analysis is primarily useful for induction variable substitution and +// strength reduction. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H +#define LLVM_ANALYSIS_SCALAREVOLUTION_H + +#include "llvm/Pass.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/ValueHandle.h" +#include <iosfwd> + +namespace llvm { + class APInt; + class ConstantInt; + class Type; + class SCEVHandle; + class ScalarEvolution; + class TargetData; + + /// SCEV - This class represents an analyzed expression in the program. These + /// are reference-counted opaque objects that the client is not allowed to + /// do much with directly. + /// + class SCEV { + const unsigned SCEVType; // The SCEV baseclass this node corresponds to + mutable unsigned RefCount; + + friend class SCEVHandle; + void addRef() const { ++RefCount; } + void dropRef() const { + if (--RefCount == 0) + delete this; + } + + SCEV(const SCEV &); // DO NOT IMPLEMENT + void operator=(const SCEV &); // DO NOT IMPLEMENT + protected: + virtual ~SCEV(); + public: + explicit SCEV(unsigned SCEVTy) : SCEVType(SCEVTy), RefCount(0) {} + + unsigned getSCEVType() const { return SCEVType; } + + /// isLoopInvariant - Return true if the value of this SCEV is unchanging in + /// the specified loop. + virtual bool isLoopInvariant(const Loop *L) const = 0; + + /// hasComputableLoopEvolution - Return true if this SCEV changes value in a + /// known way in the specified loop. This property being true implies that + /// the value is variant in the loop AND that we can emit an expression to + /// compute the value of the expression at any particular loop iteration. + virtual bool hasComputableLoopEvolution(const Loop *L) const = 0; + + /// getType - Return the LLVM type of this SCEV expression. + /// + virtual const Type *getType() const = 0; + + /// isZero - Return true if the expression is a constant zero. + /// + bool isZero() const; + + /// isOne - Return true if the expression is a constant one. + /// + bool isOne() const; + + /// replaceSymbolicValuesWithConcrete - If this SCEV internally references + /// the symbolic value "Sym", construct and return a new SCEV that produces + /// the same value, but which uses the concrete value Conc instead of the + /// symbolic value. If this SCEV does not use the symbolic value, it + /// returns itself. + virtual SCEVHandle + replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const = 0; + + /// dominates - Return true if elements that makes up this SCEV dominates + /// the specified basic block. + virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const = 0; + + /// print - Print out the internal representation of this scalar to the + /// specified stream. This should really only be used for debugging + /// purposes. + virtual void print(raw_ostream &OS) const = 0; + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + /// dump - This method is used for debugging. + /// + void dump() const; + }; + + inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) { + S.print(OS); + return OS; + } + + inline std::ostream &operator<<(std::ostream &OS, const SCEV &S) { + S.print(OS); + return OS; + } + + /// SCEVCouldNotCompute - An object of this class is returned by queries that + /// could not be answered. For example, if you ask for the number of + /// iterations of a linked-list traversal loop, you will get one of these. + /// None of the standard SCEV operations are valid on this class, it is just a + /// marker. + struct SCEVCouldNotCompute : public SCEV { + SCEVCouldNotCompute(); + ~SCEVCouldNotCompute(); + + // None of these methods are valid for this object. + virtual bool isLoopInvariant(const Loop *L) const; + virtual const Type *getType() const; + virtual bool hasComputableLoopEvolution(const Loop *L) const; + virtual void print(raw_ostream &OS) const; + virtual SCEVHandle + replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const; + + virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const { + return true; + } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVCouldNotCompute *S) { return true; } + static bool classof(const SCEV *S); + }; + + /// SCEVHandle - This class is used to maintain the SCEV object's refcounts, + /// freeing the objects when the last reference is dropped. + class SCEVHandle { + const SCEV *S; + SCEVHandle(); // DO NOT IMPLEMENT + public: + SCEVHandle(const SCEV *s) : S(s) { + assert(S && "Cannot create a handle to a null SCEV!"); + S->addRef(); + } + SCEVHandle(const SCEVHandle &RHS) : S(RHS.S) { + S->addRef(); + } + ~SCEVHandle() { S->dropRef(); } + + operator const SCEV*() const { return S; } + + const SCEV &operator*() const { return *S; } + const SCEV *operator->() const { return S; } + + bool operator==(const SCEV *RHS) const { return S == RHS; } + bool operator!=(const SCEV *RHS) const { return S != RHS; } + + const SCEVHandle &operator=(SCEV *RHS) { + if (S != RHS) { + S->dropRef(); + S = RHS; + S->addRef(); + } + return *this; + } + + const SCEVHandle &operator=(const SCEVHandle &RHS) { + if (S != RHS.S) { + S->dropRef(); + S = RHS.S; + S->addRef(); + } + return *this; + } + }; + + template<typename From> struct simplify_type; + template<> struct simplify_type<const SCEVHandle> { + typedef const SCEV* SimpleType; + static SimpleType getSimplifiedValue(const SCEVHandle &Node) { + return Node; + } + }; + template<> struct simplify_type<SCEVHandle> + : public simplify_type<const SCEVHandle> {}; + + /// ScalarEvolution - This class is the main scalar evolution driver. Because + /// client code (intentionally) can't do much with the SCEV objects directly, + /// they must ask this class for services. + /// + class ScalarEvolution : public FunctionPass { + /// SCEVCallbackVH - A CallbackVH to arrange for ScalarEvolution to be + /// notified whenever a Value is deleted. + class SCEVCallbackVH : public CallbackVH { + ScalarEvolution *SE; + virtual void deleted(); + virtual void allUsesReplacedWith(Value *New); + public: + SCEVCallbackVH(Value *V, ScalarEvolution *SE = 0); + }; + + friend class SCEVCallbackVH; + friend class SCEVExpander; + + /// F - The function we are analyzing. + /// + Function *F; + + /// LI - The loop information for the function we are currently analyzing. + /// + LoopInfo *LI; + + /// TD - The target data information for the target we are targetting. + /// + TargetData *TD; + + /// UnknownValue - This SCEV is used to represent unknown trip counts and + /// things. + SCEVHandle UnknownValue; + + /// Scalars - This is a cache of the scalars we have analyzed so far. + /// + std::map<SCEVCallbackVH, SCEVHandle> Scalars; + + /// BackedgeTakenInfo - Information about the backedge-taken count + /// of a loop. This currently inclues an exact count and a maximum count. + /// + struct BackedgeTakenInfo { + /// Exact - An expression indicating the exact backedge-taken count of + /// the loop if it is known, or a SCEVCouldNotCompute otherwise. + SCEVHandle Exact; + + /// Exact - An expression indicating the least maximum backedge-taken + /// count of the loop that is known, or a SCEVCouldNotCompute. + SCEVHandle Max; + + /*implicit*/ BackedgeTakenInfo(SCEVHandle exact) : + Exact(exact), Max(exact) {} + + /*implicit*/ BackedgeTakenInfo(const SCEV *exact) : + Exact(exact), Max(exact) {} + + BackedgeTakenInfo(SCEVHandle exact, SCEVHandle max) : + Exact(exact), Max(max) {} + + /// hasAnyInfo - Test whether this BackedgeTakenInfo contains any + /// computed information, or whether it's all SCEVCouldNotCompute + /// values. + bool hasAnyInfo() const { + return !isa<SCEVCouldNotCompute>(Exact) || + !isa<SCEVCouldNotCompute>(Max); + } + }; + + /// BackedgeTakenCounts - Cache the backedge-taken count of the loops for + /// this function as they are computed. + std::map<const Loop*, BackedgeTakenInfo> BackedgeTakenCounts; + + /// ConstantEvolutionLoopExitValue - This map contains entries for all of + /// the PHI instructions that we attempt to compute constant evolutions for. + /// This allows us to avoid potentially expensive recomputation of these + /// properties. An instruction maps to null if we are unable to compute its + /// exit value. + std::map<PHINode*, Constant*> ConstantEvolutionLoopExitValue; + + /// ValuesAtScopes - This map contains entries for all the instructions + /// that we attempt to compute getSCEVAtScope information for without + /// using SCEV techniques, which can be expensive. + std::map<Instruction *, std::map<const Loop *, Constant *> > ValuesAtScopes; + + /// createSCEV - We know that there is no SCEV for the specified value. + /// Analyze the expression. + SCEVHandle createSCEV(Value *V); + + /// createNodeForPHI - Provide the special handling we need to analyze PHI + /// SCEVs. + SCEVHandle createNodeForPHI(PHINode *PN); + + /// createNodeForGEP - Provide the special handling we need to analyze GEP + /// SCEVs. + SCEVHandle createNodeForGEP(User *GEP); + + /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value + /// for the specified instruction and replaces any references to the + /// symbolic value SymName with the specified value. This is used during + /// PHI resolution. + void ReplaceSymbolicValueWithConcrete(Instruction *I, + const SCEVHandle &SymName, + const SCEVHandle &NewVal); + + /// getBackedgeTakenInfo - Return the BackedgeTakenInfo for the given + /// loop, lazily computing new values if the loop hasn't been analyzed + /// yet. + const BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L); + + /// ComputeBackedgeTakenCount - Compute the number of times the specified + /// loop will iterate. + BackedgeTakenInfo ComputeBackedgeTakenCount(const Loop *L); + + /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition + /// of 'icmp op load X, cst', try to see if we can compute the trip count. + SCEVHandle + ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, + Constant *RHS, + const Loop *L, + ICmpInst::Predicate p); + + /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute + /// a constant number of times (the condition evolves only from constants), + /// try to evaluate a few iterations of the loop until we get the exit + /// condition gets a value of ExitWhen (true or false). If we cannot + /// evaluate the trip count of the loop, return UnknownValue. + SCEVHandle ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, + bool ExitWhen); + + /// HowFarToZero - Return the number of times a backedge comparing the + /// specified value to zero will execute. If not computable, return + /// UnknownValue. + SCEVHandle HowFarToZero(const SCEV *V, const Loop *L); + + /// HowFarToNonZero - Return the number of times a backedge checking the + /// specified value for nonzero will execute. If not computable, return + /// UnknownValue. + SCEVHandle HowFarToNonZero(const SCEV *V, const Loop *L); + + /// HowManyLessThans - Return the number of times a backedge containing the + /// specified less-than comparison will execute. If not computable, return + /// UnknownValue. isSigned specifies whether the less-than is signed. + BackedgeTakenInfo HowManyLessThans(const SCEV *LHS, const SCEV *RHS, + const Loop *L, bool isSigned); + + /// getLoopPredecessor - If the given loop's header has exactly one unique + /// predecessor outside the loop, return it. Otherwise return null. + BasicBlock *getLoopPredecessor(const Loop *L); + + /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB + /// (which may not be an immediate predecessor) which has exactly one + /// successor from which BB is reachable, or null if no such block is + /// found. + BasicBlock* getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB); + + /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is + /// in the header of its containing loop, we know the loop executes a + /// constant number of times, and the PHI node is just a recurrence + /// involving constants, fold it. + Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs, + const Loop *L); + + /// forgetLoopPHIs - Delete the memoized SCEVs associated with the + /// PHI nodes in the given loop. This is used when the trip count of + /// the loop may have changed. + void forgetLoopPHIs(const Loop *L); + + public: + static char ID; // Pass identification, replacement for typeid + ScalarEvolution(); + + /// isSCEVable - Test if values of the given type are analyzable within + /// the SCEV framework. This primarily includes integer types, and it + /// can optionally include pointer types if the ScalarEvolution class + /// has access to target-specific information. + bool isSCEVable(const Type *Ty) const; + + /// getTypeSizeInBits - Return the size in bits of the specified type, + /// for which isSCEVable must return true. + uint64_t getTypeSizeInBits(const Type *Ty) const; + + /// getEffectiveSCEVType - Return a type with the same bitwidth as + /// the given type and which represents how SCEV will treat the given + /// type, for which isSCEVable must return true. For pointer types, + /// this is the pointer-sized integer type. + const Type *getEffectiveSCEVType(const Type *Ty) const; + + /// getSCEV - Return a SCEV expression handle for the full generality of the + /// specified expression. + SCEVHandle getSCEV(Value *V); + + SCEVHandle getConstant(ConstantInt *V); + SCEVHandle getConstant(const APInt& Val); + SCEVHandle getTruncateExpr(const SCEVHandle &Op, const Type *Ty); + SCEVHandle getZeroExtendExpr(const SCEVHandle &Op, const Type *Ty); + SCEVHandle getSignExtendExpr(const SCEVHandle &Op, const Type *Ty); + SCEVHandle getAddExpr(std::vector<SCEVHandle> &Ops); + SCEVHandle getAddExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { + std::vector<SCEVHandle> Ops; + Ops.push_back(LHS); + Ops.push_back(RHS); + return getAddExpr(Ops); + } + SCEVHandle getAddExpr(const SCEVHandle &Op0, const SCEVHandle &Op1, + const SCEVHandle &Op2) { + std::vector<SCEVHandle> Ops; + Ops.push_back(Op0); + Ops.push_back(Op1); + Ops.push_back(Op2); + return getAddExpr(Ops); + } + SCEVHandle getMulExpr(std::vector<SCEVHandle> &Ops); + SCEVHandle getMulExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { + std::vector<SCEVHandle> Ops; + Ops.push_back(LHS); + Ops.push_back(RHS); + return getMulExpr(Ops); + } + SCEVHandle getUDivExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); + SCEVHandle getAddRecExpr(const SCEVHandle &Start, const SCEVHandle &Step, + const Loop *L); + SCEVHandle getAddRecExpr(std::vector<SCEVHandle> &Operands, + const Loop *L); + SCEVHandle getAddRecExpr(const std::vector<SCEVHandle> &Operands, + const Loop *L) { + std::vector<SCEVHandle> NewOp(Operands); + return getAddRecExpr(NewOp, L); + } + SCEVHandle getSMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); + SCEVHandle getSMaxExpr(std::vector<SCEVHandle> Operands); + SCEVHandle getUMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); + SCEVHandle getUMaxExpr(std::vector<SCEVHandle> Operands); + SCEVHandle getUnknown(Value *V); + SCEVHandle getCouldNotCompute(); + + /// getNegativeSCEV - Return the SCEV object corresponding to -V. + /// + SCEVHandle getNegativeSCEV(const SCEVHandle &V); + + /// getNotSCEV - Return the SCEV object corresponding to ~V. + /// + SCEVHandle getNotSCEV(const SCEVHandle &V); + + /// getMinusSCEV - Return LHS-RHS. + /// + SCEVHandle getMinusSCEV(const SCEVHandle &LHS, + const SCEVHandle &RHS); + + /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion + /// of the input value to the specified type. If the type must be + /// extended, it is zero extended. + SCEVHandle getTruncateOrZeroExtend(const SCEVHandle &V, const Type *Ty); + + /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion + /// of the input value to the specified type. If the type must be + /// extended, it is sign extended. + SCEVHandle getTruncateOrSignExtend(const SCEVHandle &V, const Type *Ty); + + /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of + /// the input value to the specified type. If the type must be extended, + /// it is zero extended. The conversion must not be narrowing. + SCEVHandle getNoopOrZeroExtend(const SCEVHandle &V, const Type *Ty); + + /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of + /// the input value to the specified type. If the type must be extended, + /// it is sign extended. The conversion must not be narrowing. + SCEVHandle getNoopOrSignExtend(const SCEVHandle &V, const Type *Ty); + + /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the + /// input value to the specified type. The conversion must not be + /// widening. + SCEVHandle getTruncateOrNoop(const SCEVHandle &V, const Type *Ty); + + /// getIntegerSCEV - Given an integer or FP type, create a constant for the + /// specified signed integer value and return a SCEV for the constant. + SCEVHandle getIntegerSCEV(int Val, const Type *Ty); + + /// hasSCEV - Return true if the SCEV for this value has already been + /// computed. + bool hasSCEV(Value *V) const; + + /// setSCEV - Insert the specified SCEV into the map of current SCEVs for + /// the specified value. + void setSCEV(Value *V, const SCEVHandle &H); + + /// getSCEVAtScope - Return a SCEV expression handle for the specified value + /// at the specified scope in the program. The L value specifies a loop + /// nest to evaluate the expression at, where null is the top-level or a + /// specified loop is immediately inside of the loop. + /// + /// This method can be used to compute the exit value for a variable defined + /// in a loop by querying what the value will hold in the parent loop. + /// + /// In the case that a relevant loop exit value cannot be computed, the + /// original value V is returned. + SCEVHandle getSCEVAtScope(const SCEV *S, const Loop *L); + + /// getSCEVAtScope - This is a convenience function which does + /// getSCEVAtScope(getSCEV(V), L). + SCEVHandle getSCEVAtScope(Value *V, const Loop *L); + + /// isLoopGuardedByCond - Test whether entry to the loop is protected by + /// a conditional between LHS and RHS. This is used to help avoid max + /// expressions in loop trip counts. + bool isLoopGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, + const SCEV *LHS, const SCEV *RHS); + + /// getBackedgeTakenCount - If the specified loop has a predictable + /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute + /// object. The backedge-taken count is the number of times the loop header + /// will be branched to from within the loop. This is one less than the + /// trip count of the loop, since it doesn't count the first iteration, + /// when the header is branched to from outside the loop. + /// + /// Note that it is not valid to call this method on a loop without a + /// loop-invariant backedge-taken count (see + /// hasLoopInvariantBackedgeTakenCount). + /// + SCEVHandle getBackedgeTakenCount(const Loop *L); + + /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except + /// return the least SCEV value that is known never to be less than the + /// actual backedge taken count. + SCEVHandle getMaxBackedgeTakenCount(const Loop *L); + + /// hasLoopInvariantBackedgeTakenCount - Return true if the specified loop + /// has an analyzable loop-invariant backedge-taken count. + bool hasLoopInvariantBackedgeTakenCount(const Loop *L); + + /// forgetLoopBackedgeTakenCount - This method should be called by the + /// client when it has changed a loop in a way that may effect + /// ScalarEvolution's ability to compute a trip count, or if the loop + /// is deleted. + void forgetLoopBackedgeTakenCount(const Loop *L); + + virtual bool runOnFunction(Function &F); + virtual void releaseMemory(); + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + void print(raw_ostream &OS, const Module* = 0) const; + virtual void print(std::ostream &OS, const Module* = 0) const; + void print(std::ostream *OS, const Module* M = 0) const { + if (OS) print(*OS, M); + } + }; +} + +#endif diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h new file mode 100644 index 0000000..7e0de47 --- /dev/null +++ b/include/llvm/Analysis/ScalarEvolutionExpander.h @@ -0,0 +1,147 @@ +//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the classes used to generate code from scalar expressions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H +#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H + +#include "llvm/Instructions.h" +#include "llvm/Type.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" + +namespace llvm { + /// SCEVExpander - This class uses information about analyze scalars to + /// rewrite expressions in canonical form. + /// + /// Clients should create an instance of this class when rewriting is needed, + /// and destroy it when finished to allow the release of the associated + /// memory. + struct SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> { + ScalarEvolution &SE; + std::map<SCEVHandle, AssertingVH<Value> > InsertedExpressions; + std::set<Value*> InsertedValues; + + BasicBlock::iterator InsertPt; + + friend struct SCEVVisitor<SCEVExpander, Value*>; + public: + explicit SCEVExpander(ScalarEvolution &se) + : SE(se) {} + + /// clear - Erase the contents of the InsertedExpressions map so that users + /// trying to expand the same expression into multiple BasicBlocks or + /// different places within the same BasicBlock can do so. + void clear() { InsertedExpressions.clear(); } + + /// isInsertedInstruction - Return true if the specified instruction was + /// inserted by the code rewriter. If so, the client should not modify the + /// instruction. + bool isInsertedInstruction(Instruction *I) const { + return InsertedValues.count(I); + } + + /// isInsertedExpression - Return true if the the code rewriter has a + /// Value* recorded for the given expression. + bool isInsertedExpression(const SCEV *S) const { + return InsertedExpressions.count(S); + } + + /// getOrInsertCanonicalInductionVariable - This method returns the + /// canonical induction variable of the specified type for the specified + /// loop (inserting one if there is none). A canonical induction variable + /// starts at zero and steps by one on each iteration. + Value *getOrInsertCanonicalInductionVariable(const Loop *L, const Type *Ty){ + assert(Ty->isInteger() && "Can only insert integer induction variables!"); + SCEVHandle H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), + SE.getIntegerSCEV(1, Ty), L); + return expand(H); + } + + /// addInsertedValue - Remember the specified instruction as being the + /// canonical form for the specified SCEV. + void addInsertedValue(Value *V, const SCEV *S) { + InsertedExpressions[S] = V; + InsertedValues.insert(V); + } + + void setInsertionPoint(BasicBlock::iterator NewIP) { InsertPt = NewIP; } + + BasicBlock::iterator getInsertionPoint() const { return InsertPt; } + + /// expandCodeFor - Insert code to directly compute the specified SCEV + /// expression into the program. The inserted code is inserted into the + /// SCEVExpander's current insertion point. If a type is specified, the + /// result will be expanded to have that type, with a cast if necessary. + Value *expandCodeFor(SCEVHandle SH, const Type *Ty = 0); + + /// expandCodeFor - Insert code to directly compute the specified SCEV + /// expression into the program. The inserted code is inserted into the + /// specified block. + Value *expandCodeFor(SCEVHandle SH, const Type *Ty, + BasicBlock::iterator IP) { + setInsertionPoint(IP); + return expandCodeFor(SH, Ty); + } + + /// InsertCastOfTo - Insert a cast of V to the specified type, doing what + /// we can to share the casts. + Value *InsertCastOfTo(Instruction::CastOps opcode, Value *V, + const Type *Ty); + + /// InsertNoopCastOfTo - Insert a cast of V to the specified type, + /// which must be possible with a noop cast. + Value *InsertNoopCastOfTo(Value *V, const Type *Ty); + + /// InsertBinop - Insert the specified binary operator, doing a small amount + /// of work to avoid inserting an obviously redundant operation. + Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, + Value *RHS, BasicBlock::iterator InsertPt); + + private: + /// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP + /// instead of using ptrtoint+arithmetic+inttoptr. + Value *expandAddToGEP(const SCEVHandle *op_begin, const SCEVHandle *op_end, + const PointerType *PTy, const Type *Ty, Value *V); + + Value *expand(const SCEV *S); + + Value *visitConstant(const SCEVConstant *S) { + return S->getValue(); + } + + Value *visitTruncateExpr(const SCEVTruncateExpr *S); + + Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S); + + Value *visitSignExtendExpr(const SCEVSignExtendExpr *S); + + Value *visitAddExpr(const SCEVAddExpr *S); + + Value *visitMulExpr(const SCEVMulExpr *S); + + Value *visitUDivExpr(const SCEVUDivExpr *S); + + Value *visitAddRecExpr(const SCEVAddRecExpr *S); + + Value *visitSMaxExpr(const SCEVSMaxExpr *S); + + Value *visitUMaxExpr(const SCEVUMaxExpr *S); + + Value *visitUnknown(const SCEVUnknown *S) { + return S->getValue(); + } + }; +} + +#endif + diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h new file mode 100644 index 0000000..264447e --- /dev/null +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -0,0 +1,588 @@ +//===- llvm/Analysis/ScalarEvolutionExpressions.h - SCEV Exprs --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the classes used to represent and build scalar expressions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_EXPRESSIONS_H +#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPRESSIONS_H + +#include "llvm/Analysis/ScalarEvolution.h" + +namespace llvm { + class ConstantInt; + class ConstantRange; + class APInt; + class DominatorTree; + + enum SCEVTypes { + // These should be ordered in terms of increasing complexity to make the + // folders simpler. + scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr, + scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr, scUnknown, + scCouldNotCompute + }; + + //===--------------------------------------------------------------------===// + /// SCEVConstant - This class represents a constant integer value. + /// + class SCEVConstant : public SCEV { + friend class ScalarEvolution; + + ConstantInt *V; + explicit SCEVConstant(ConstantInt *v) : SCEV(scConstant), V(v) {} + + virtual ~SCEVConstant(); + public: + ConstantInt *getValue() const { return V; } + + virtual bool isLoopInvariant(const Loop *L) const { + return true; + } + + virtual bool hasComputableLoopEvolution(const Loop *L) const { + return false; // Not loop variant + } + + virtual const Type *getType() const; + + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const { + return this; + } + + bool dominates(BasicBlock *BB, DominatorTree *DT) const { + return true; + } + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVConstant *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scConstant; + } + }; + + //===--------------------------------------------------------------------===// + /// SCEVCastExpr - This is the base class for unary cast operator classes. + /// + class SCEVCastExpr : public SCEV { + protected: + SCEVHandle Op; + const Type *Ty; + + SCEVCastExpr(unsigned SCEVTy, const SCEVHandle &op, const Type *ty); + virtual ~SCEVCastExpr(); + + public: + const SCEVHandle &getOperand() const { return Op; } + virtual const Type *getType() const { return Ty; } + + virtual bool isLoopInvariant(const Loop *L) const { + return Op->isLoopInvariant(L); + } + + virtual bool hasComputableLoopEvolution(const Loop *L) const { + return Op->hasComputableLoopEvolution(L); + } + + virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVCastExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scTruncate || + S->getSCEVType() == scZeroExtend || + S->getSCEVType() == scSignExtend; + } + }; + + //===--------------------------------------------------------------------===// + /// SCEVTruncateExpr - This class represents a truncation of an integer value + /// to a smaller integer value. + /// + class SCEVTruncateExpr : public SCEVCastExpr { + friend class ScalarEvolution; + + SCEVTruncateExpr(const SCEVHandle &op, const Type *ty); + virtual ~SCEVTruncateExpr(); + + public: + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const { + SCEVHandle H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + if (H == Op) + return this; + return SE.getTruncateExpr(H, Ty); + } + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVTruncateExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scTruncate; + } + }; + + //===--------------------------------------------------------------------===// + /// SCEVZeroExtendExpr - This class represents a zero extension of a small + /// integer value to a larger integer value. + /// + class SCEVZeroExtendExpr : public SCEVCastExpr { + friend class ScalarEvolution; + + SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty); + virtual ~SCEVZeroExtendExpr(); + + public: + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const { + SCEVHandle H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + if (H == Op) + return this; + return SE.getZeroExtendExpr(H, Ty); + } + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVZeroExtendExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scZeroExtend; + } + }; + + //===--------------------------------------------------------------------===// + /// SCEVSignExtendExpr - This class represents a sign extension of a small + /// integer value to a larger integer value. + /// + class SCEVSignExtendExpr : public SCEVCastExpr { + friend class ScalarEvolution; + + SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty); + virtual ~SCEVSignExtendExpr(); + + public: + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const { + SCEVHandle H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + if (H == Op) + return this; + return SE.getSignExtendExpr(H, Ty); + } + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVSignExtendExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scSignExtend; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVNAryExpr - This node is a base class providing common + /// functionality for n'ary operators. + /// + class SCEVNAryExpr : public SCEV { + protected: + std::vector<SCEVHandle> Operands; + + SCEVNAryExpr(enum SCEVTypes T, const std::vector<SCEVHandle> &ops) + : SCEV(T), Operands(ops) {} + virtual ~SCEVNAryExpr() {} + + public: + unsigned getNumOperands() const { return (unsigned)Operands.size(); } + const SCEVHandle &getOperand(unsigned i) const { + assert(i < Operands.size() && "Operand index out of range!"); + return Operands[i]; + } + + const std::vector<SCEVHandle> &getOperands() const { return Operands; } + typedef std::vector<SCEVHandle>::const_iterator op_iterator; + op_iterator op_begin() const { return Operands.begin(); } + op_iterator op_end() const { return Operands.end(); } + + virtual bool isLoopInvariant(const Loop *L) const { + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (!getOperand(i)->isLoopInvariant(L)) return false; + return true; + } + + // hasComputableLoopEvolution - N-ary expressions have computable loop + // evolutions iff they have at least one operand that varies with the loop, + // but that all varying operands are computable. + virtual bool hasComputableLoopEvolution(const Loop *L) const { + bool HasVarying = false; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (!getOperand(i)->isLoopInvariant(L)) { + if (getOperand(i)->hasComputableLoopEvolution(L)) + HasVarying = true; + else + return false; + } + return HasVarying; + } + + bool dominates(BasicBlock *BB, DominatorTree *DT) const; + + virtual const Type *getType() const { return getOperand(0)->getType(); } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVNAryExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scAddExpr || + S->getSCEVType() == scMulExpr || + S->getSCEVType() == scSMaxExpr || + S->getSCEVType() == scUMaxExpr || + S->getSCEVType() == scAddRecExpr; + } + }; + + //===--------------------------------------------------------------------===// + /// SCEVCommutativeExpr - This node is the base class for n'ary commutative + /// operators. + /// + class SCEVCommutativeExpr : public SCEVNAryExpr { + protected: + SCEVCommutativeExpr(enum SCEVTypes T, const std::vector<SCEVHandle> &ops) + : SCEVNAryExpr(T, ops) {} + ~SCEVCommutativeExpr(); + + public: + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const; + + virtual const char *getOperationStr() const = 0; + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVCommutativeExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scAddExpr || + S->getSCEVType() == scMulExpr || + S->getSCEVType() == scSMaxExpr || + S->getSCEVType() == scUMaxExpr; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVAddExpr - This node represents an addition of some number of SCEVs. + /// + class SCEVAddExpr : public SCEVCommutativeExpr { + friend class ScalarEvolution; + + explicit SCEVAddExpr(const std::vector<SCEVHandle> &ops) + : SCEVCommutativeExpr(scAddExpr, ops) { + } + + public: + virtual const char *getOperationStr() const { return " + "; } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVAddExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scAddExpr; + } + }; + + //===--------------------------------------------------------------------===// + /// SCEVMulExpr - This node represents multiplication of some number of SCEVs. + /// + class SCEVMulExpr : public SCEVCommutativeExpr { + friend class ScalarEvolution; + + explicit SCEVMulExpr(const std::vector<SCEVHandle> &ops) + : SCEVCommutativeExpr(scMulExpr, ops) { + } + + public: + virtual const char *getOperationStr() const { return " * "; } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVMulExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scMulExpr; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVUDivExpr - This class represents a binary unsigned division operation. + /// + class SCEVUDivExpr : public SCEV { + friend class ScalarEvolution; + + SCEVHandle LHS, RHS; + SCEVUDivExpr(const SCEVHandle &lhs, const SCEVHandle &rhs) + : SCEV(scUDivExpr), LHS(lhs), RHS(rhs) {} + + virtual ~SCEVUDivExpr(); + public: + const SCEVHandle &getLHS() const { return LHS; } + const SCEVHandle &getRHS() const { return RHS; } + + virtual bool isLoopInvariant(const Loop *L) const { + return LHS->isLoopInvariant(L) && RHS->isLoopInvariant(L); + } + + virtual bool hasComputableLoopEvolution(const Loop *L) const { + return LHS->hasComputableLoopEvolution(L) && + RHS->hasComputableLoopEvolution(L); + } + + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const { + SCEVHandle L = LHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + SCEVHandle R = RHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + if (L == LHS && R == RHS) + return this; + else + return SE.getUDivExpr(L, R); + } + + bool dominates(BasicBlock *BB, DominatorTree *DT) const; + + virtual const Type *getType() const; + + void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVUDivExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scUDivExpr; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVAddRecExpr - This node represents a polynomial recurrence on the trip + /// count of the specified loop. This is the primary focus of the + /// ScalarEvolution framework; all the other SCEV subclasses are mostly just + /// supporting infrastructure to allow SCEVAddRecExpr expressions to be + /// created and analyzed. + /// + /// All operands of an AddRec are required to be loop invariant. + /// + class SCEVAddRecExpr : public SCEVNAryExpr { + friend class ScalarEvolution; + + const Loop *L; + + SCEVAddRecExpr(const std::vector<SCEVHandle> &ops, const Loop *l) + : SCEVNAryExpr(scAddRecExpr, ops), L(l) { + for (size_t i = 0, e = Operands.size(); i != e; ++i) + assert(Operands[i]->isLoopInvariant(l) && + "Operands of AddRec must be loop-invariant!"); + } + ~SCEVAddRecExpr(); + + public: + const SCEVHandle &getStart() const { return Operands[0]; } + const Loop *getLoop() const { return L; } + + /// getStepRecurrence - This method constructs and returns the recurrence + /// indicating how much this expression steps by. If this is a polynomial + /// of degree N, it returns a chrec of degree N-1. + SCEVHandle getStepRecurrence(ScalarEvolution &SE) const { + if (isAffine()) return getOperand(1); + return SE.getAddRecExpr(std::vector<SCEVHandle>(op_begin()+1,op_end()), + getLoop()); + } + + virtual bool hasComputableLoopEvolution(const Loop *QL) const { + if (L == QL) return true; + return false; + } + + virtual bool isLoopInvariant(const Loop *QueryLoop) const; + + /// isAffine - Return true if this is an affine AddRec (i.e., it represents + /// an expressions A+B*x where A and B are loop invariant values. + bool isAffine() const { + // We know that the start value is invariant. This expression is thus + // affine iff the step is also invariant. + return getNumOperands() == 2; + } + + /// isQuadratic - Return true if this is an quadratic AddRec (i.e., it + /// represents an expressions A+B*x+C*x^2 where A, B and C are loop + /// invariant values. This corresponds to an addrec of the form {L,+,M,+,N} + bool isQuadratic() const { + return getNumOperands() == 3; + } + + /// evaluateAtIteration - Return the value of this chain of recurrences at + /// the specified iteration number. + SCEVHandle evaluateAtIteration(SCEVHandle It, ScalarEvolution &SE) const; + + /// getNumIterationsInRange - Return the number of iterations of this loop + /// that produce values in the specified constant range. Another way of + /// looking at this is that it returns the first iteration number where the + /// value is not in the condition, thus computing the exit count. If the + /// iteration count can't be computed, an instance of SCEVCouldNotCompute is + /// returned. + SCEVHandle getNumIterationsInRange(ConstantRange Range, + ScalarEvolution &SE) const; + + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const; + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVAddRecExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scAddRecExpr; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVSMaxExpr - This class represents a signed maximum selection. + /// + class SCEVSMaxExpr : public SCEVCommutativeExpr { + friend class ScalarEvolution; + + explicit SCEVSMaxExpr(const std::vector<SCEVHandle> &ops) + : SCEVCommutativeExpr(scSMaxExpr, ops) { + } + + public: + virtual const char *getOperationStr() const { return " smax "; } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVSMaxExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scSMaxExpr; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVUMaxExpr - This class represents an unsigned maximum selection. + /// + class SCEVUMaxExpr : public SCEVCommutativeExpr { + friend class ScalarEvolution; + + explicit SCEVUMaxExpr(const std::vector<SCEVHandle> &ops) + : SCEVCommutativeExpr(scUMaxExpr, ops) { + } + + public: + virtual const char *getOperationStr() const { return " umax "; } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVUMaxExpr *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scUMaxExpr; + } + }; + + + //===--------------------------------------------------------------------===// + /// SCEVUnknown - This means that we are dealing with an entirely unknown SCEV + /// value, and only represent it as it's LLVM Value. This is the "bottom" + /// value for the analysis. + /// + class SCEVUnknown : public SCEV { + friend class ScalarEvolution; + + Value *V; + explicit SCEVUnknown(Value *v) : SCEV(scUnknown), V(v) {} + + protected: + ~SCEVUnknown(); + public: + Value *getValue() const { return V; } + + virtual bool isLoopInvariant(const Loop *L) const; + virtual bool hasComputableLoopEvolution(const Loop *QL) const { + return false; // not computable + } + + SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, + const SCEVHandle &Conc, + ScalarEvolution &SE) const { + if (&*Sym == this) return Conc; + return this; + } + + bool dominates(BasicBlock *BB, DominatorTree *DT) const; + + virtual const Type *getType() const; + + virtual void print(raw_ostream &OS) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SCEVUnknown *S) { return true; } + static inline bool classof(const SCEV *S) { + return S->getSCEVType() == scUnknown; + } + }; + + /// SCEVVisitor - This class defines a simple visitor class that may be used + /// for various SCEV analysis purposes. + template<typename SC, typename RetVal=void> + struct SCEVVisitor { + RetVal visit(const SCEV *S) { + switch (S->getSCEVType()) { + case scConstant: + return ((SC*)this)->visitConstant((const SCEVConstant*)S); + case scTruncate: + return ((SC*)this)->visitTruncateExpr((const SCEVTruncateExpr*)S); + case scZeroExtend: + return ((SC*)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr*)S); + case scSignExtend: + return ((SC*)this)->visitSignExtendExpr((const SCEVSignExtendExpr*)S); + case scAddExpr: + return ((SC*)this)->visitAddExpr((const SCEVAddExpr*)S); + case scMulExpr: + return ((SC*)this)->visitMulExpr((const SCEVMulExpr*)S); + case scUDivExpr: + return ((SC*)this)->visitUDivExpr((const SCEVUDivExpr*)S); + case scAddRecExpr: + return ((SC*)this)->visitAddRecExpr((const SCEVAddRecExpr*)S); + case scSMaxExpr: + return ((SC*)this)->visitSMaxExpr((const SCEVSMaxExpr*)S); + case scUMaxExpr: + return ((SC*)this)->visitUMaxExpr((const SCEVUMaxExpr*)S); + case scUnknown: + return ((SC*)this)->visitUnknown((const SCEVUnknown*)S); + case scCouldNotCompute: + return ((SC*)this)->visitCouldNotCompute((const SCEVCouldNotCompute*)S); + default: + assert(0 && "Unknown SCEV type!"); + abort(); + } + } + + RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) { + assert(0 && "Invalid use of SCEVCouldNotCompute!"); + abort(); + return RetVal(); + } + }; +} + +#endif diff --git a/include/llvm/Analysis/SparsePropagation.h b/include/llvm/Analysis/SparsePropagation.h new file mode 100644 index 0000000..c75531a --- /dev/null +++ b/include/llvm/Analysis/SparsePropagation.h @@ -0,0 +1,200 @@ +//===- SparsePropagation.h - Sparse Conditional Property Propagation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements an abstract sparse conditional propagation algorithm, +// modeled after SCCP, but with a customizable lattice function. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_SPARSE_PROPAGATION_H +#define LLVM_ANALYSIS_SPARSE_PROPAGATION_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include <iosfwd> +#include <vector> +#include <set> + +namespace llvm { + class Value; + class Constant; + class Argument; + class Instruction; + class PHINode; + class TerminatorInst; + class BasicBlock; + class Function; + class SparseSolver; + + template<typename T> class SmallVectorImpl; + +/// AbstractLatticeFunction - This class is implemented by the dataflow instance +/// to specify what the lattice values are and how they handle merges etc. +/// This gives the client the power to compute lattice values from instructions, +/// constants, etc. The requirement is that lattice values must all fit into +/// a void*. If a void* is not sufficient, the implementation should use this +/// pointer to be a pointer into a uniquing set or something. +/// +class AbstractLatticeFunction { +public: + typedef void *LatticeVal; +private: + LatticeVal UndefVal, OverdefinedVal, UntrackedVal; +public: + AbstractLatticeFunction(LatticeVal undefVal, LatticeVal overdefinedVal, + LatticeVal untrackedVal) { + UndefVal = undefVal; + OverdefinedVal = overdefinedVal; + UntrackedVal = untrackedVal; + } + virtual ~AbstractLatticeFunction(); + + LatticeVal getUndefVal() const { return UndefVal; } + LatticeVal getOverdefinedVal() const { return OverdefinedVal; } + LatticeVal getUntrackedVal() const { return UntrackedVal; } + + /// IsUntrackedValue - If the specified Value is something that is obviously + /// uninteresting to the analysis (and would always return UntrackedVal), + /// this function can return true to avoid pointless work. + virtual bool IsUntrackedValue(Value *V) { + return false; + } + + /// ComputeConstant - Given a constant value, compute and return a lattice + /// value corresponding to the specified constant. + virtual LatticeVal ComputeConstant(Constant *C) { + return getOverdefinedVal(); // always safe + } + + /// GetConstant - If the specified lattice value is representable as an LLVM + /// constant value, return it. Otherwise return null. The returned value + /// must be in the same LLVM type as Val. + virtual Constant *GetConstant(LatticeVal LV, Value *Val, SparseSolver &SS) { + return 0; + } + + /// ComputeArgument - Given a formal argument value, compute and return a + /// lattice value corresponding to the specified argument. + virtual LatticeVal ComputeArgument(Argument *I) { + return getOverdefinedVal(); // always safe + } + + /// MergeValues - Compute and return the merge of the two specified lattice + /// values. Merging should only move one direction down the lattice to + /// guarantee convergence (toward overdefined). + virtual LatticeVal MergeValues(LatticeVal X, LatticeVal Y) { + return getOverdefinedVal(); // always safe, never useful. + } + + /// ComputeInstructionState - Given an instruction and a vector of its operand + /// values, compute the result value of the instruction. + virtual LatticeVal ComputeInstructionState(Instruction &I, SparseSolver &SS) { + return getOverdefinedVal(); // always safe, never useful. + } + + /// PrintValue - Render the specified lattice value to the specified stream. + virtual void PrintValue(LatticeVal V, std::ostream &OS); +}; + + +/// SparseSolver - This class is a general purpose solver for Sparse Conditional +/// Propagation with a programmable lattice function. +/// +class SparseSolver { + typedef AbstractLatticeFunction::LatticeVal LatticeVal; + + /// LatticeFunc - This is the object that knows the lattice and how to do + /// compute transfer functions. + AbstractLatticeFunction *LatticeFunc; + + DenseMap<Value*, LatticeVal> ValueState; // The state each value is in. + SmallPtrSet<BasicBlock*, 16> BBExecutable; // The bbs that are executable. + + std::vector<Instruction*> InstWorkList; // Worklist of insts to process. + + std::vector<BasicBlock*> BBWorkList; // The BasicBlock work list + + /// KnownFeasibleEdges - Entries in this set are edges which have already had + /// PHI nodes retriggered. + typedef std::pair<BasicBlock*,BasicBlock*> Edge; + std::set<Edge> KnownFeasibleEdges; + + SparseSolver(const SparseSolver&); // DO NOT IMPLEMENT + void operator=(const SparseSolver&); // DO NOT IMPLEMENT +public: + explicit SparseSolver(AbstractLatticeFunction *Lattice) + : LatticeFunc(Lattice) {} + ~SparseSolver() { + delete LatticeFunc; + } + + /// Solve - Solve for constants and executable blocks. + /// + void Solve(Function &F); + + void Print(Function &F, std::ostream &OS) const; + + /// getLatticeState - Return the LatticeVal object that corresponds to the + /// value. If an value is not in the map, it is returned as untracked, + /// unlike the getOrInitValueState method. + LatticeVal getLatticeState(Value *V) const { + DenseMap<Value*, LatticeVal>::iterator I = ValueState.find(V); + return I != ValueState.end() ? I->second : LatticeFunc->getUntrackedVal(); + } + + /// getOrInitValueState - Return the LatticeVal object that corresponds to the + /// value, initializing the value's state if it hasn't been entered into the + /// map yet. This function is necessary because not all values should start + /// out in the underdefined state... Arguments should be overdefined, and + /// constants should be marked as constants. + /// + LatticeVal getOrInitValueState(Value *V); + + /// isEdgeFeasible - Return true if the control flow edge from the 'From' + /// basic block to the 'To' basic block is currently feasible. If + /// AggressiveUndef is true, then this treats values with unknown lattice + /// values as undefined. This is generally only useful when solving the + /// lattice, not when querying it. + bool isEdgeFeasible(BasicBlock *From, BasicBlock *To, + bool AggressiveUndef = false); + + /// isBlockExecutable - Return true if there are any known feasible + /// edges into the basic block. This is generally only useful when + /// querying the lattice. + bool isBlockExecutable(BasicBlock *BB) const { + return BBExecutable.count(BB); + } + +private: + /// UpdateState - When the state for some instruction is potentially updated, + /// this function notices and adds I to the worklist if needed. + void UpdateState(Instruction &Inst, LatticeVal V); + + /// MarkBlockExecutable - This method can be used by clients to mark all of + /// the blocks that are known to be intrinsically live in the processed unit. + void MarkBlockExecutable(BasicBlock *BB); + + /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB + /// work list if it is not already executable. + void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest); + + /// getFeasibleSuccessors - Return a vector of booleans to indicate which + /// successors are reachable from a given terminator instruction. + void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs, + bool AggressiveUndef); + + void visitInst(Instruction &I); + void visitPHINode(PHINode &I); + void visitTerminatorInst(TerminatorInst &TI); + +}; + +} // end namespace llvm + +#endif // LLVM_ANALYSIS_SPARSE_PROPAGATION_H diff --git a/include/llvm/Analysis/Trace.h b/include/llvm/Analysis/Trace.h new file mode 100644 index 0000000..fd615fc --- /dev/null +++ b/include/llvm/Analysis/Trace.h @@ -0,0 +1,120 @@ +//===- llvm/Analysis/Trace.h - Represent one trace of LLVM code -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class represents a single trace of LLVM basic blocks. A trace is a +// single entry, multiple exit, region of code that is often hot. Trace-based +// optimizations treat traces almost like they are a large, strange, basic +// block: because the trace path is assumed to be hot, optimizations for the +// fall-through path are made at the expense of the non-fall-through paths. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_TRACE_H +#define LLVM_ANALYSIS_TRACE_H + +#include "llvm/Support/Streams.h" +#include <vector> +#include <cassert> + +namespace llvm { + class BasicBlock; + class Function; + class Module; + +class Trace { + typedef std::vector<BasicBlock *> BasicBlockListType; + BasicBlockListType BasicBlocks; + +public: + /// Trace ctor - Make a new trace from a vector of basic blocks, + /// residing in the function which is the parent of the first + /// basic block in the vector. + /// + Trace(const std::vector<BasicBlock *> &vBB) : BasicBlocks (vBB) {} + + /// getEntryBasicBlock - Return the entry basic block (first block) + /// of the trace. + /// + BasicBlock *getEntryBasicBlock () const { return BasicBlocks[0]; } + + /// operator[]/getBlock - Return basic block N in the trace. + /// + BasicBlock *operator[](unsigned i) const { return BasicBlocks[i]; } + BasicBlock *getBlock(unsigned i) const { return BasicBlocks[i]; } + + /// getFunction - Return this trace's parent function. + /// + Function *getFunction () const; + + /// getModule - Return this Module that contains this trace's parent + /// function. + /// + Module *getModule () const; + + /// getBlockIndex - Return the index of the specified basic block in the + /// trace, or -1 if it is not in the trace. + int getBlockIndex(const BasicBlock *X) const { + for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i) + if (BasicBlocks[i] == X) + return i; + return -1; + } + + /// contains - Returns true if this trace contains the given basic + /// block. + /// + bool contains(const BasicBlock *X) const { + return getBlockIndex(X) != -1; + } + + /// Returns true if B1 occurs before B2 in the trace, or if it is the same + /// block as B2.. Both blocks must be in the trace. + /// + bool dominates(const BasicBlock *B1, const BasicBlock *B2) const { + int B1Idx = getBlockIndex(B1), B2Idx = getBlockIndex(B2); + assert(B1Idx != -1 && B2Idx != -1 && "Block is not in the trace!"); + return B1Idx <= B2Idx; + } + + // BasicBlock iterators... + typedef BasicBlockListType::iterator iterator; + typedef BasicBlockListType::const_iterator const_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + + iterator begin() { return BasicBlocks.begin(); } + const_iterator begin() const { return BasicBlocks.begin(); } + iterator end () { return BasicBlocks.end(); } + const_iterator end () const { return BasicBlocks.end(); } + + reverse_iterator rbegin() { return BasicBlocks.rbegin(); } + const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); } + reverse_iterator rend () { return BasicBlocks.rend(); } + const_reverse_iterator rend () const { return BasicBlocks.rend(); } + + unsigned size() const { return BasicBlocks.size(); } + bool empty() const { return BasicBlocks.empty(); } + + iterator erase(iterator q) { return BasicBlocks.erase (q); } + iterator erase(iterator q1, iterator q2) { return BasicBlocks.erase (q1, q2); } + + /// print - Write trace to output stream. + /// + void print (std::ostream &O) const; + void print (std::ostream *O) const { if (O) print(*O); } + + /// dump - Debugger convenience method; writes trace to standard error + /// output stream. + /// + void dump () const; +}; + +} // end namespace llvm + +#endif // TRACE_H diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h new file mode 100644 index 0000000..5f5f77a --- /dev/null +++ b/include/llvm/Analysis/ValueTracking.h @@ -0,0 +1,87 @@ +//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains routines that help analyze properties that chains of +// computations have. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_VALUETRACKING_H +#define LLVM_ANALYSIS_VALUETRACKING_H + +#include "llvm/Support/DataTypes.h" +#include <string> + +namespace llvm { + class Value; + class Instruction; + class APInt; + class TargetData; + + /// ComputeMaskedBits - Determine which of the bits specified in Mask are + /// known to be either zero or one and return them in the KnownZero/KnownOne + /// bit sets. This code only analyzes bits in Mask, in order to short-circuit + /// processing. + void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero, + APInt &KnownOne, TargetData *TD = 0, + unsigned Depth = 0); + + /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use + /// this predicate to simplify operations downstream. Mask is known to be + /// zero for bits that V cannot have. + bool MaskedValueIsZero(Value *V, const APInt &Mask, + TargetData *TD = 0, unsigned Depth = 0); + + + /// ComputeNumSignBits - Return the number of times the sign bit of the + /// register is replicated into the other bits. We know that at least 1 bit + /// is always equal to the sign bit (itself), but other cases can give us + /// information. For example, immediately after an "ashr X, 2", we know that + /// the top 3 bits are all equal to each other, so we return 3. + /// + /// 'Op' must have a scalar integer type. + /// + unsigned ComputeNumSignBits(Value *Op, TargetData *TD = 0, + unsigned Depth = 0); + + /// CannotBeNegativeZero - Return true if we can prove that the specified FP + /// value is never equal to -0.0. + /// + bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0); + + /// FindScalarValue - Given an aggregrate and an sequence of indices, see if + /// the scalar value indexed is already around as a register, for example if + /// it were inserted directly into the aggregrate. + /// + /// If InsertBefore is not null, this function will duplicate (modified) + /// insertvalues when a part of a nested struct is extracted. + Value *FindInsertedValue(Value *V, + const unsigned *idx_begin, + const unsigned *idx_end, + Instruction *InsertBefore = 0); + + /// This is a convenience wrapper for finding values indexed by a single index + /// only. + inline Value *FindInsertedValue(Value *V, const unsigned Idx, + Instruction *InsertBefore = 0) { + const unsigned Idxs[1] = { Idx }; + return FindInsertedValue(V, &Idxs[0], &Idxs[1], InsertBefore); + } + + /// GetConstantStringInfo - This function computes the length of a + /// null-terminated C string pointed to by V. If successful, it returns true + /// and returns the string in Str. If unsuccessful, it returns false. If + /// StopAtNul is set to true (the default), the returned string is truncated + /// by a nul character in the global. If StopAtNul is false, the nul + /// character is included in the result string. + bool GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset = 0, + bool StopAtNul = true); +} // end namespace llvm + +#endif diff --git a/include/llvm/Analysis/Verifier.h b/include/llvm/Analysis/Verifier.h new file mode 100644 index 0000000..a6b2a6d --- /dev/null +++ b/include/llvm/Analysis/Verifier.h @@ -0,0 +1,75 @@ +//===-- llvm/Analysis/Verifier.h - Module Verifier --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the function verifier interface, that can be used for some +// sanity checking of input to the system, and for checking that transformations +// haven't done something bad. +// +// Note that this does not provide full 'java style' security and verifications, +// instead it just tries to ensure that code is well formed. +// +// To see what specifically is checked, look at the top of Verifier.cpp +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_VERIFIER_H +#define LLVM_ANALYSIS_VERIFIER_H + +#include <string> + +namespace llvm { + +class FunctionPass; +class Module; +class Function; + +/// @brief An enumeration to specify the action to be taken if errors found. +/// +/// This enumeration is used in the functions below to indicate what should +/// happen if the verifier finds errors. Each of the functions that uses +/// this enumeration as an argument provides a default value for it. The +/// actions are listed below. +enum VerifierFailureAction { + AbortProcessAction, ///< verifyModule will print to stderr and abort() + PrintMessageAction, ///< verifyModule will print to stderr and return true + ReturnStatusAction ///< verifyModule will just return true +}; + +/// @brief Create a verifier pass. +/// +/// Check a module or function for validity. When the pass is used, the +/// action indicated by the \p action argument will be used if errors are +/// found. +FunctionPass *createVerifierPass( + VerifierFailureAction action = AbortProcessAction ///< Action to take +); + +/// @brief Check a module for errors. +/// +/// If there are no errors, the function returns false. If an error is found, +/// the action taken depends on the \p action parameter. +/// This should only be used for debugging, because it plays games with +/// PassManagers and stuff. + +bool verifyModule( + const Module &M, ///< The module to be verified + VerifierFailureAction action = AbortProcessAction, ///< Action to take + std::string *ErrorInfo = 0 ///< Information about failures. +); + +// verifyFunction - Check a function for errors, useful for use when debugging a +// pass. +bool verifyFunction( + const Function &F, ///< The function to be verified + VerifierFailureAction action = AbortProcessAction ///< Action to take +); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Argument.h b/include/llvm/Argument.h new file mode 100644 index 0000000..9c06367 --- /dev/null +++ b/include/llvm/Argument.h @@ -0,0 +1,84 @@ +//===-- llvm/Argument.h - Definition of the Argument class ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Argument class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ARGUMENT_H +#define LLVM_ARGUMENT_H + +#include "llvm/Value.h" +#include "llvm/Attributes.h" +#include "llvm/ADT/ilist_node.h" + +namespace llvm { + +template<typename ValueSubClass, typename ItemParentClass> + class SymbolTableListTraits; + +/// A class to represent an incoming formal argument to a Function. An argument +/// is a very simple Value. It is essentially a named (optional) type. When used +/// in the body of a function, it represents the value of the actual argument +/// the function was called with. +/// @brief LLVM Argument representation +class Argument : public Value, public ilist_node<Argument> { + Function *Parent; + + friend class SymbolTableListTraits<Argument, Function>; + void setParent(Function *parent); + +public: + /// Argument ctor - If Function argument is specified, this argument is + /// inserted at the end of the argument list for the function. + /// + explicit Argument(const Type *Ty, const std::string &Name = "", + Function *F = 0); + + inline const Function *getParent() const { return Parent; } + inline Function *getParent() { return Parent; } + + /// getArgNo - Return the index of this formal argument in its containing + /// function. For example in "void foo(int a, float b)" a is 0 and b is 1. + unsigned getArgNo() const; + + /// hasByValAttr - Return true if this argument has the byval attribute on it + /// in its containing function. + bool hasByValAttr() const; + + /// hasNoAliasAttr - Return true if this argument has the noalias attribute on + /// it in its containing function. + bool hasNoAliasAttr() const; + + /// hasNoCaptureAttr - Return true if this argument has the nocapture + /// attribute on it in its containing function. + bool hasNoCaptureAttr() const; + + /// hasSRetAttr - Return true if this argument has the sret attribute on it in + /// its containing function. + bool hasStructRetAttr() const; + + /// addAttr - Add a Attribute to an argument + void addAttr(Attributes); + + /// removeAttr - Remove a Attribute from an argument + void removeAttr(Attributes); + + /// classof - Methods for support type inquiry through isa, cast, and + /// dyn_cast: + /// + static inline bool classof(const Argument *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == ArgumentVal; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Assembly/AsmAnnotationWriter.h b/include/llvm/Assembly/AsmAnnotationWriter.h new file mode 100644 index 0000000..6c3ddaf --- /dev/null +++ b/include/llvm/Assembly/AsmAnnotationWriter.h @@ -0,0 +1,53 @@ +//===-- AsmAnnotationWriter.h - Itf for annotation .ll files - --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Clients of the assembly writer can use this interface to add their own +// special-purpose annotations to LLVM assembly language printouts. Note that +// the assembly parser won't be able to parse these, in general, so +// implementations are advised to print stuff as LLVM comments. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ASSEMBLY_ASMANNOTATIONWRITER_H +#define LLVM_ASSEMBLY_ASMANNOTATIONWRITER_H + +namespace llvm { + +class Function; +class BasicBlock; +class Instruction; +class raw_ostream; + +class AssemblyAnnotationWriter { +public: + + virtual ~AssemblyAnnotationWriter(); + + // emitFunctionAnnot - This may be implemented to emit a string right before + // the start of a function. + virtual void emitFunctionAnnot(const Function *F, raw_ostream &OS) {} + + // emitBasicBlockStartAnnot - This may be implemented to emit a string right + // after the basic block label, but before the first instruction in the block. + virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, raw_ostream &OS){ + } + + // emitBasicBlockEndAnnot - This may be implemented to emit a string right + // after the basic block. + virtual void emitBasicBlockEndAnnot(const BasicBlock *BB, raw_ostream &OS){ + } + + // emitInstructionAnnot - This may be implemented to emit a string right + // before an instruction is emitted. + virtual void emitInstructionAnnot(const Instruction *I, raw_ostream &OS) {} +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Assembly/Parser.h b/include/llvm/Assembly/Parser.h new file mode 100644 index 0000000..e4a38e4 --- /dev/null +++ b/include/llvm/Assembly/Parser.h @@ -0,0 +1,101 @@ +//===-- llvm/Assembly/Parser.h - Parser for VM assembly files ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes are implemented by the lib/AsmParser library. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ASSEMBLY_PARSER_H +#define LLVM_ASSEMBLY_PARSER_H + +#include <string> + +namespace llvm { + +class Module; +class ParseError; +class raw_ostream; + +/// This function is the main interface to the LLVM Assembly Parser. It parses +/// an ASCII file that (presumably) contains LLVM Assembly code. It returns a +/// Module (intermediate representation) with the corresponding features. Note +/// that this does not verify that the generated Module is valid, so you should +/// run the verifier after parsing the file to check that it is okay. +/// @brief Parse LLVM Assembly from a file +Module *ParseAssemblyFile( + const std::string &Filename, ///< The name of the file to parse + ParseError &Error ///< If not null, an object to return errors in. +); + +/// The function is a secondary interface to the LLVM Assembly Parser. It parses +/// an ASCII string that (presumably) contains LLVM Assembly code. It returns a +/// Module (intermediate representation) with the corresponding features. Note +/// that this does not verify that the generated Module is valid, so you should +/// run the verifier after parsing the file to check that it is okay. +/// @brief Parse LLVM Assembly from a string +Module *ParseAssemblyString( + const char *AsmString, ///< The string containing assembly + Module *M, ///< A module to add the assembly too. + ParseError &Error ///< If not null, an object to return errors in. +); + +//===------------------------------------------------------------------------=== +// Helper Classes +//===------------------------------------------------------------------------=== + +/// An instance of this class can be passed to ParseAssemblyFile or +/// ParseAssemblyString functions in order to capture error information from +/// the parser. It provides a standard way to print out the error message +/// including the file name and line number where the error occurred. +/// @brief An LLVM Assembly Parsing Error Object +class ParseError { +public: + ParseError() : Filename("unknown"), Message("none"), LineNo(0), ColumnNo(0) {} + ParseError(const ParseError &E); + + void setFilename(const std::string &F) { Filename = F; } + + inline const std::string &getRawMessage() const { // Just the raw message. + return Message; + } + + inline const std::string &getFilename() const { + return Filename; + } + + void setError(const std::string &message, int lineNo = -1, int ColNo = -1, + const std::string &FileContents = "") { + Message = message; + LineNo = lineNo; ColumnNo = ColNo; + LineContents = FileContents; + } + + // getErrorLocation - Return the line and column number of the error in the + // input source file. The source filename can be derived from the + // ParserOptions in effect. If positional information is not applicable, + // these will return a value of -1. + // + inline void getErrorLocation(int &Line, int &Column) const { + Line = LineNo; Column = ColumnNo; + } + + void PrintError(const char *ProgName, raw_ostream &S); + +private : + std::string Filename; + std::string Message; + int LineNo, ColumnNo; // -1 if not relevant + std::string LineContents; + + void operator=(const ParseError &E); // DO NOT IMPLEMENT +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Assembly/PrintModulePass.h b/include/llvm/Assembly/PrintModulePass.h new file mode 100644 index 0000000..fb4f6a7 --- /dev/null +++ b/include/llvm/Assembly/PrintModulePass.h @@ -0,0 +1,40 @@ +//===- llvm/Assembly/PrintModulePass.h - Printing Pass ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines two passes to print out a module. The PrintModulePass pass +// simply prints out the entire module when it is executed. The +// PrintFunctionPass class is designed to be pipelined with other +// FunctionPass's, and prints out the functions of the module as they are +// processed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ASSEMBLY_PRINTMODULEPASS_H +#define LLVM_ASSEMBLY_PRINTMODULEPASS_H + +#include <string> + +namespace llvm { + class FunctionPass; + class ModulePass; + class raw_ostream; + + /// createPrintModulePass - Create and return a pass that writes the + /// module to the specified raw_ostream. + ModulePass *createPrintModulePass(raw_ostream *OS, bool DeleteStream=false); + + /// createPrintFunctionPass - Create and return a pass that prints + /// functions to the specified raw_ostream as they are processed. + FunctionPass *createPrintFunctionPass(const std::string &Banner, + raw_ostream *OS, + bool DeleteStream=false); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Assembly/Writer.h b/include/llvm/Assembly/Writer.h new file mode 100644 index 0000000..5e5fe15 --- /dev/null +++ b/include/llvm/Assembly/Writer.h @@ -0,0 +1,81 @@ +//===-- llvm/Assembly/Writer.h - Printer for LLVM assembly files --*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This functionality is implemented by lib/VMCore/AsmWriter.cpp. +// This library is used to print LLVM assembly language files to an iostream. It +// can print LLVM code at a variety of granularities, including Modules, +// BasicBlocks, and Instructions. This makes it useful for debugging. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ASSEMBLY_WRITER_H +#define LLVM_ASSEMBLY_WRITER_H + +#include <iosfwd> +#include <string> + +namespace llvm { + +class Type; +class Module; +class Value; +class raw_ostream; +template <typename T> class SmallVectorImpl; + +/// TypePrinting - Type printing machinery. +class TypePrinting { + void *TypeNames; // A map to remember type names. + TypePrinting(const TypePrinting &); // DO NOT IMPLEMENT + void operator=(const TypePrinting&); // DO NOT IMPLEMENT +public: + TypePrinting(); + ~TypePrinting(); + + void clear(); + + void print(const Type *Ty, raw_ostream &OS, bool IgnoreTopLevelName = false); + + void printAtLeastOneLevel(const Type *Ty, raw_ostream &OS) { + print(Ty, OS, true); + } + + /// hasTypeName - Return true if the type has a name in TypeNames, false + /// otherwise. + bool hasTypeName(const Type *Ty) const; + + /// addTypeName - Add a name for the specified type if it doesn't already have + /// one. This name will be printed instead of the structural version of the + /// type in order to make the output more concise. + void addTypeName(const Type *Ty, const std::string &N); + +private: + void CalcTypeName(const Type *Ty, SmallVectorImpl<const Type *> &TypeStack, + raw_ostream &OS, bool IgnoreTopLevelName = false); +}; + +// WriteTypeSymbolic - This attempts to write the specified type as a symbolic +// type, if there is an entry in the Module's symbol table for the specified +// type or one of its component types. +// +void WriteTypeSymbolic(raw_ostream &, const Type *, const Module *M); + +// WriteAsOperand - Write the name of the specified value out to the specified +// ostream. This can be useful when you just want to print int %reg126, not the +// whole instruction that generated it. If you specify a Module for context, +// then even constants get pretty-printed; for example, the type of a null +// pointer is printed symbolically. +// +void WriteAsOperand(std::ostream &, const Value *, bool PrintTy = true, + const Module *Context = 0); +void WriteAsOperand(raw_ostream &, const Value *, bool PrintTy = true, + const Module *Context = 0); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Attributes.h b/include/llvm/Attributes.h new file mode 100644 index 0000000..972dbfa --- /dev/null +++ b/include/llvm/Attributes.h @@ -0,0 +1,250 @@ +//===-- llvm/Attributes.h - Container for Attributes ---*---------- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the simple types necessary to represent the +// attributes associated with functions and their calls. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ATTRIBUTES_H +#define LLVM_ATTRIBUTES_H + +#include "llvm/Support/MathExtras.h" +#include <cassert> +#include <string> + +namespace llvm { +class Type; + +/// Attributes - A bitset of attributes. +typedef unsigned Attributes; + +namespace Attribute { + +/// Function parameters and results can have attributes to indicate how they +/// should be treated by optimizations and code generation. This enumeration +/// lists the attributes that can be associated with parameters, function +/// results or the function itself. +/// @brief Function attributes. + +const Attributes None = 0; ///< No attributes have been set +const Attributes ZExt = 1<<0; ///< Zero extended before/after call +const Attributes SExt = 1<<1; ///< Sign extended before/after call +const Attributes NoReturn = 1<<2; ///< Mark the function as not returning +const Attributes InReg = 1<<3; ///< Force argument to be passed in register +const Attributes StructRet = 1<<4; ///< Hidden pointer to structure to return +const Attributes NoUnwind = 1<<5; ///< Function doesn't unwind stack +const Attributes NoAlias = 1<<6; ///< Considered to not alias after call +const Attributes ByVal = 1<<7; ///< Pass structure by value +const Attributes Nest = 1<<8; ///< Nested function static chain +const Attributes ReadNone = 1<<9; ///< Function does not access memory +const Attributes ReadOnly = 1<<10; ///< Function only reads from memory +const Attributes NoInline = 1<<11; ///< inline=never +const Attributes AlwaysInline = 1<<12; ///< inline=always +const Attributes OptimizeForSize = 1<<13; ///< opt_size +const Attributes StackProtect = 1<<14; ///< Stack protection. +const Attributes StackProtectReq = 1<<15; ///< Stack protection required. +const Attributes Alignment = 31<<16; ///< Alignment of parameter (5 bits) + // stored as log2 of alignment with +1 bias + // 0 means unaligned different from align 1 +const Attributes NoCapture = 1<<21; ///< Function creates no aliases of pointer + +/// @brief Attributes that only apply to function parameters. +const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture; + +/// @brief Attributes that only apply to function. +const Attributes FunctionOnly = NoReturn | NoUnwind | ReadNone | ReadOnly | + NoInline | AlwaysInline | OptimizeForSize | StackProtect | StackProtectReq; + +/// @brief Parameter attributes that do not apply to vararg call arguments. +const Attributes VarArgsIncompatible = StructRet; + +/// @brief Attributes that are mutually incompatible. +const Attributes MutuallyIncompatible[4] = { + ByVal | InReg | Nest | StructRet, + ZExt | SExt, + ReadNone | ReadOnly, + NoInline | AlwaysInline +}; + +/// @brief Which attributes cannot be applied to a type. +Attributes typeIncompatible(const Type *Ty); + +/// This turns an int alignment (a power of 2, normally) into the +/// form used internally in Attributes. +inline Attributes constructAlignmentFromInt(unsigned i) { + // Default alignment, allow the target to define how to align it. + if (i == 0) + return 0; + + assert(isPowerOf2_32(i) && "Alignment must be a power of two."); + assert(i <= 0x40000000 && "Alignment too large."); + return (Log2_32(i)+1) << 16; +} + +/// This returns the alignment field of an attribute as a byte alignment value. +inline unsigned getAlignmentFromAttrs(Attributes A) { + Attributes Align = A & Attribute::Alignment; + if (Align == 0) + return 0; + + return 1U << ((Align >> 16) - 1); +} + + +/// The set of Attributes set in Attributes is converted to a +/// string of equivalent mnemonics. This is, presumably, for writing out +/// the mnemonics for the assembly writer. +/// @brief Convert attribute bits to text +std::string getAsString(Attributes Attrs); +} // end namespace Attribute + +/// This is just a pair of values to associate a set of attributes +/// with an index. +struct AttributeWithIndex { + Attributes Attrs; ///< The attributes that are set, or'd together. + unsigned Index; ///< Index of the parameter for which the attributes apply. + ///< Index 0 is used for return value attributes. + ///< Index ~0U is used for function attributes. + + static AttributeWithIndex get(unsigned Idx, Attributes Attrs) { + AttributeWithIndex P; + P.Index = Idx; + P.Attrs = Attrs; + return P; + } +}; + +//===----------------------------------------------------------------------===// +// AttrListPtr Smart Pointer +//===----------------------------------------------------------------------===// + +class AttributeListImpl; + +/// AttrListPtr - This class manages the ref count for the opaque +/// AttributeListImpl object and provides accessors for it. +class AttrListPtr { + /// AttrList - The attributes that we are managing. This can be null + /// to represent the empty attributes list. + AttributeListImpl *AttrList; +public: + AttrListPtr() : AttrList(0) {} + AttrListPtr(const AttrListPtr &P); + const AttrListPtr &operator=(const AttrListPtr &RHS); + ~AttrListPtr(); + + //===--------------------------------------------------------------------===// + // Attribute List Construction and Mutation + //===--------------------------------------------------------------------===// + + /// get - Return a Attributes list with the specified parameter in it. + static AttrListPtr get(const AttributeWithIndex *Attr, unsigned NumAttrs); + + /// get - Return a Attribute list with the parameters specified by the + /// consecutive random access iterator range. + template <typename Iter> + static AttrListPtr get(const Iter &I, const Iter &E) { + if (I == E) return AttrListPtr(); // Empty list. + return get(&*I, static_cast<unsigned>(E-I)); + } + + /// addAttr - Add the specified attribute at the specified index to this + /// attribute list. Since attribute lists are immutable, this + /// returns the new list. + AttrListPtr addAttr(unsigned Idx, Attributes Attrs) const; + + /// removeAttr - Remove the specified attribute at the specified index from + /// this attribute list. Since attribute lists are immutable, this + /// returns the new list. + AttrListPtr removeAttr(unsigned Idx, Attributes Attrs) const; + + //===--------------------------------------------------------------------===// + // Attribute List Accessors + //===--------------------------------------------------------------------===// + /// getParamAttributes - The attributes for the specified index are + /// returned. + Attributes getParamAttributes(unsigned Idx) const { + assert (Idx && Idx != ~0U && "Invalid parameter index!"); + return getAttributes(Idx); + } + + /// getRetAttributes - The attributes for the ret value are + /// returned. + Attributes getRetAttributes() const { + return getAttributes(0); + } + + /// getFnAttributes - The function attributes are returned. + Attributes getFnAttributes() const { + return getAttributes(~0); + } + + /// paramHasAttr - Return true if the specified parameter index has the + /// specified attribute set. + bool paramHasAttr(unsigned Idx, Attributes Attr) const { + return getAttributes(Idx) & Attr; + } + + /// getParamAlignment - Return the alignment for the specified function + /// parameter. + unsigned getParamAlignment(unsigned Idx) const { + return Attribute::getAlignmentFromAttrs(getAttributes(Idx)); + } + + /// hasAttrSomewhere - Return true if the specified attribute is set for at + /// least one parameter or for the return value. + bool hasAttrSomewhere(Attributes Attr) const; + + /// operator==/!= - Provide equality predicates. + bool operator==(const AttrListPtr &RHS) const { return AttrList == RHS.AttrList; } + bool operator!=(const AttrListPtr &RHS) const { return AttrList != RHS.AttrList; } + + void dump() const; + + //===--------------------------------------------------------------------===// + // Attribute List Introspection + //===--------------------------------------------------------------------===// + + /// getRawPointer - Return a raw pointer that uniquely identifies this + /// attribute list. + void *getRawPointer() const { + return AttrList; + } + + // Attributes are stored as a dense set of slots, where there is one + // slot for each argument that has an attribute. This allows walking over the + // dense set instead of walking the sparse list of attributes. + + /// isEmpty - Return true if there are no attributes. + /// + bool isEmpty() const { + return AttrList == 0; + } + + /// getNumSlots - Return the number of slots used in this attribute list. + /// This is the number of arguments that have an attribute set on them + /// (including the function itself). + unsigned getNumSlots() const; + + /// getSlot - Return the AttributeWithIndex at the specified slot. This + /// holds a index number plus a set of attributes. + const AttributeWithIndex &getSlot(unsigned Slot) const; + +private: + explicit AttrListPtr(AttributeListImpl *L); + + /// getAttributes - The attributes for the specified index are + /// returned. Attributes for the result are denoted with Idx = 0. + Attributes getAttributes(unsigned Idx) const; + +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/AutoUpgrade.h b/include/llvm/AutoUpgrade.h new file mode 100644 index 0000000..f61bd1a --- /dev/null +++ b/include/llvm/AutoUpgrade.h @@ -0,0 +1,39 @@ +//===-- llvm/AutoUpgrade.h - AutoUpgrade Helpers ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These functions are implemented by lib/VMCore/AutoUpgrade.cpp. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AUTOUPGRADE_H +#define LLVM_AUTOUPGRADE_H + +namespace llvm { + class Function; + class CallInst; + + /// This is a more granular function that simply checks an intrinsic function + /// for upgrading, and returns true if it requires upgrading. It may return + /// null in NewFn if the all calls to the original intrinsic function + /// should be transformed to non-function-call instructions. + bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn); + + /// This is the complement to the above, replacing a specific call to an + /// intrinsic function with a call to the specified new function. + void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn); + + /// This is an auto-upgrade hook for any old intrinsic function syntaxes + /// which need to have both the function updated as well as all calls updated + /// to the new function. This should only be run in a post-processing fashion + /// so that it can update all calls to the old function. + void UpgradeCallsToIntrinsic(Function* F); + +} // End llvm namespace + +#endif diff --git a/include/llvm/BasicBlock.h b/include/llvm/BasicBlock.h new file mode 100644 index 0000000..072f615 --- /dev/null +++ b/include/llvm/BasicBlock.h @@ -0,0 +1,235 @@ +//===-- llvm/BasicBlock.h - Represent a basic block in the VM ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the BasicBlock class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BASICBLOCK_H +#define LLVM_BASICBLOCK_H + +#include "llvm/Instruction.h" +#include "llvm/SymbolTableListTraits.h" +#include "llvm/ADT/ilist.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class TerminatorInst; + +template<> struct ilist_traits<Instruction> + : public SymbolTableListTraits<Instruction, BasicBlock> { + // createSentinel is used to get hold of a node that marks the end of + // the list... + // The sentinel is relative to this instance, so we use a non-static + // method. + Instruction *createSentinel() const { + // since i(p)lists always publicly derive from the corresponding + // traits, placing a data member in this class will augment i(p)list. + // But since the NodeTy is expected to publicly derive from + // ilist_node<NodeTy>, there is a legal viable downcast from it + // to NodeTy. We use this trick to superpose i(p)list with a "ghostly" + // NodeTy, which becomes the sentinel. Dereferencing the sentinel is + // forbidden (save the ilist_node<NodeTy>) so no one will ever notice + // the superposition. + return static_cast<Instruction*>(&Sentinel); + } + static void destroySentinel(Instruction*) {} + + Instruction *provideInitialHead() const { return createSentinel(); } + Instruction *ensureHead(Instruction*) const { return createSentinel(); } + static void noteHead(Instruction*, Instruction*) {} +private: + mutable ilist_node<Instruction> Sentinel; +}; + +/// This represents a single basic block in LLVM. A basic block is simply a +/// container of instructions that execute sequentially. Basic blocks are Values +/// because they are referenced by instructions such as branches and switch +/// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block +/// represents a label to which a branch can jump. +/// +/// A well formed basic block is formed of a list of non-terminating +/// instructions followed by a single TerminatorInst instruction. +/// TerminatorInst's may not occur in the middle of basic blocks, and must +/// terminate the blocks. The BasicBlock class allows malformed basic blocks to +/// occur because it may be useful in the intermediate stage of constructing or +/// modifying a program. However, the verifier will ensure that basic blocks +/// are "well formed". +/// @brief LLVM Basic Block Representation +class BasicBlock : public Value, // Basic blocks are data objects also + public ilist_node<BasicBlock> { + +public: + typedef iplist<Instruction> InstListType; +private: + InstListType InstList; + Function *Parent; + + void setParent(Function *parent); + friend class SymbolTableListTraits<BasicBlock, Function>; + + BasicBlock(const BasicBlock &); // Do not implement + void operator=(const BasicBlock &); // Do not implement + + /// BasicBlock ctor - If the function parameter is specified, the basic block + /// is automatically inserted at either the end of the function (if + /// InsertBefore is null), or before the specified basic block. + /// + explicit BasicBlock(const std::string &Name = "", Function *Parent = 0, + BasicBlock *InsertBefore = 0); +public: + /// Instruction iterators... + typedef InstListType::iterator iterator; + typedef InstListType::const_iterator const_iterator; + + /// Create - Creates a new BasicBlock. If the Parent parameter is specified, + /// the basic block is automatically inserted at either the end of the + /// function (if InsertBefore is 0), or before the specified basic block. + static BasicBlock *Create(const std::string &Name = "", Function *Parent = 0, + BasicBlock *InsertBefore = 0) { + return new BasicBlock(Name, Parent, InsertBefore); + } + ~BasicBlock(); + + /// getParent - Return the enclosing method, or null if none + /// + const Function *getParent() const { return Parent; } + Function *getParent() { return Parent; } + + /// use_back - Specialize the methods defined in Value, as we know that an + /// BasicBlock can only be used by Instructions (specifically PHI nodes and + /// terminators). + Instruction *use_back() { return cast<Instruction>(*use_begin());} + const Instruction *use_back() const { return cast<Instruction>(*use_begin());} + + /// getTerminator() - If this is a well formed basic block, then this returns + /// a pointer to the terminator instruction. If it is not, then you get a + /// null pointer back. + /// + TerminatorInst *getTerminator(); + const TerminatorInst *getTerminator() const; + + /// Returns a pointer to the first instructon in this block that is not a + /// PHINode instruction. When adding instruction to the beginning of the + /// basic block, they should be added before the returned value, not before + /// the first instruction, which might be PHI. + /// Returns 0 is there's no non-PHI instruction. + Instruction* getFirstNonPHI(); + const Instruction* getFirstNonPHI() const { + return const_cast<BasicBlock*>(this)->getFirstNonPHI(); + } + + /// removeFromParent - This method unlinks 'this' from the containing + /// function, but does not delete it. + /// + void removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing function + /// and deletes it. + /// + void eraseFromParent(); + + /// moveBefore - Unlink this basic block from its current function and + /// insert it into the function that MovePos lives in, right before MovePos. + void moveBefore(BasicBlock *MovePos); + + /// moveAfter - Unlink this basic block from its current function and + /// insert it into the function that MovePos lives in, right after MovePos. + void moveAfter(BasicBlock *MovePos); + + + /// getSinglePredecessor - If this basic block has a single predecessor block, + /// return the block, otherwise return a null pointer. + BasicBlock *getSinglePredecessor(); + const BasicBlock *getSinglePredecessor() const { + return const_cast<BasicBlock*>(this)->getSinglePredecessor(); + } + + /// getUniquePredecessor - If this basic block has a unique predecessor block, + /// return the block, otherwise return a null pointer. + /// Note that unique predecessor doesn't mean single edge, there can be + /// multiple edges from the unique predecessor to this block (for example + /// a switch statement with multiple cases having the same destination). + BasicBlock *getUniquePredecessor(); + const BasicBlock *getUniquePredecessor() const { + return const_cast<BasicBlock*>(this)->getUniquePredecessor(); + } + + //===--------------------------------------------------------------------===// + /// Instruction iterator methods + /// + inline iterator begin() { return InstList.begin(); } + inline const_iterator begin() const { return InstList.begin(); } + inline iterator end () { return InstList.end(); } + inline const_iterator end () const { return InstList.end(); } + + inline size_t size() const { return InstList.size(); } + inline bool empty() const { return InstList.empty(); } + inline const Instruction &front() const { return InstList.front(); } + inline Instruction &front() { return InstList.front(); } + inline const Instruction &back() const { return InstList.back(); } + inline Instruction &back() { return InstList.back(); } + + /// getInstList() - Return the underlying instruction list container. You + /// need to access it directly if you want to modify it currently. + /// + const InstListType &getInstList() const { return InstList; } + InstListType &getInstList() { return InstList; } + + /// getSublistAccess() - returns pointer to member of instruction list + static iplist<Instruction> BasicBlock::*getSublistAccess(Instruction*) { + return &BasicBlock::InstList; + } + + /// getValueSymbolTable() - returns pointer to symbol table (if any) + ValueSymbolTable *getValueSymbolTable(); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const BasicBlock *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == Value::BasicBlockVal; + } + + /// dropAllReferences() - This function causes all the subinstructions to "let + /// go" of all references that they are maintaining. This allows one to + /// 'delete' a whole class at a time, even though there may be circular + /// references... first all references are dropped, and all use counts go to + /// zero. Then everything is delete'd for real. Note that no operations are + /// valid on an object that has "dropped all references", except operator + /// delete. + /// + void dropAllReferences(); + + /// removePredecessor - This method is used to notify a BasicBlock that the + /// specified Predecessor of the block is no longer able to reach it. This is + /// actually not used to update the Predecessor list, but is actually used to + /// update the PHI nodes that reside in the block. Note that this should be + /// called while the predecessor still refers to this block. + /// + void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs = false); + + /// splitBasicBlock - This splits a basic block into two at the specified + /// instruction. Note that all instructions BEFORE the specified iterator + /// stay as part of the original basic block, an unconditional branch is added + /// to the original BB, and the rest of the instructions in the BB are moved + /// to the new BB, including the old terminator. The newly formed BasicBlock + /// is returned. This function invalidates the specified iterator. + /// + /// Note that this only works on well formed basic blocks (must have a + /// terminator), and 'I' must not be the end of instruction list (which would + /// cause a degenerate basic block to be formed, having a terminator inside of + /// the basic block). + /// + BasicBlock *splitBasicBlock(iterator I, const std::string &BBName = ""); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/Archive.h b/include/llvm/Bitcode/Archive.h new file mode 100644 index 0000000..a3631ac --- /dev/null +++ b/include/llvm/Bitcode/Archive.h @@ -0,0 +1,545 @@ +//===-- llvm/Bitcode/Archive.h - LLVM Bitcode Archive -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file declares the Archive and ArchiveMember classes that provide +// manipulation of LLVM Archive files. The implementation is provided by the +// lib/Bitcode/Archive library. This library is used to read and write +// archive (*.a) files that contain LLVM bitcode files (or others). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_ARCHIVE_H +#define LLVM_BITCODE_ARCHIVE_H + +#include "llvm/ADT/ilist.h" +#include "llvm/ADT/ilist_node.h" +#include "llvm/System/Path.h" +#include <map> +#include <set> +#include <fstream> + +namespace llvm { + class MemoryBuffer; + +// Forward declare classes +class ModuleProvider; // From VMCore +class Module; // From VMCore +class Archive; // Declared below +class ArchiveMemberHeader; // Internal implementation class + +/// This class is the main class manipulated by users of the Archive class. It +/// holds information about one member of the Archive. It is also the element +/// stored by the Archive's ilist, the Archive's main abstraction. Because of +/// the special requirements of archive files, users are not permitted to +/// construct ArchiveMember instances. You should obtain them from the methods +/// of the Archive class instead. +/// @brief This class represents a single archive member. +class ArchiveMember : public ilist_node<ArchiveMember> { + /// @name Types + /// @{ + public: + /// These flags are used internally by the archive member to specify various + /// characteristics of the member. The various "is" methods below provide + /// access to the flags. The flags are not user settable. + enum Flags { + CompressedFlag = 1, ///< Member is a normal compressed file + SVR4SymbolTableFlag = 2, ///< Member is a SVR4 symbol table + BSD4SymbolTableFlag = 4, ///< Member is a BSD4 symbol table + LLVMSymbolTableFlag = 8, ///< Member is an LLVM symbol table + BitcodeFlag = 16, ///< Member is bitcode + HasPathFlag = 64, ///< Member has a full or partial path + HasLongFilenameFlag = 128, ///< Member uses the long filename syntax + StringTableFlag = 256 ///< Member is an ar(1) format string table + }; + + /// @} + /// @name Accessors + /// @{ + public: + /// @returns the parent Archive instance + /// @brief Get the archive associated with this member + Archive* getArchive() const { return parent; } + + /// @returns the path to the Archive's file + /// @brief Get the path to the archive member + const sys::Path& getPath() const { return path; } + + /// The "user" is the owner of the file per Unix security. This may not + /// have any applicability on non-Unix systems but is a required component + /// of the "ar" file format. + /// @brief Get the user associated with this archive member. + unsigned getUser() const { return info.getUser(); } + + /// The "group" is the owning group of the file per Unix security. This + /// may not have any applicability on non-Unix systems but is a required + /// component of the "ar" file format. + /// @brief Get the group associated with this archive member. + unsigned getGroup() const { return info.getGroup(); } + + /// The "mode" specifies the access permissions for the file per Unix + /// security. This may not have any applicabiity on non-Unix systems but is + /// a required component of the "ar" file format. + /// @brief Get the permission mode associated with this archive member. + unsigned getMode() const { return info.getMode(); } + + /// This method returns the time at which the archive member was last + /// modified when it was not in the archive. + /// @brief Get the time of last modification of the archive member. + sys::TimeValue getModTime() const { return info.getTimestamp(); } + + /// @returns the size of the archive member in bytes. + /// @brief Get the size of the archive member. + uint64_t getSize() const { return info.getSize(); } + + /// This method returns the total size of the archive member as it + /// appears on disk. This includes the file content, the header, the + /// long file name if any, and the padding. + /// @brief Get total on-disk member size. + unsigned getMemberSize() const; + + /// This method will return a pointer to the in-memory content of the + /// archive member, if it is available. If the data has not been loaded + /// into memory, the return value will be null. + /// @returns a pointer to the member's data. + /// @brief Get the data content of the archive member + const void* getData() const { return data; } + + /// This method determines if the member is a regular compressed file. + /// @returns true iff the archive member is a compressed regular file. + /// @brief Determine if the member is a compressed regular file. + bool isCompressed() const { return flags&CompressedFlag; } + + /// @returns true iff the member is a SVR4 (non-LLVM) symbol table + /// @brief Determine if this member is a SVR4 symbol table. + bool isSVR4SymbolTable() const { return flags&SVR4SymbolTableFlag; } + + /// @returns true iff the member is a BSD4.4 (non-LLVM) symbol table + /// @brief Determine if this member is a BSD4.4 symbol table. + bool isBSD4SymbolTable() const { return flags&BSD4SymbolTableFlag; } + + /// @returns true iff the archive member is the LLVM symbol table + /// @brief Determine if this member is the LLVM symbol table. + bool isLLVMSymbolTable() const { return flags&LLVMSymbolTableFlag; } + + /// @returns true iff the archive member is the ar(1) string table + /// @brief Determine if this member is the ar(1) string table. + bool isStringTable() const { return flags&StringTableFlag; } + + /// @returns true iff the archive member is a bitcode file. + /// @brief Determine if this member is a bitcode file. + bool isBitcode() const { return flags&BitcodeFlag; } + + /// @returns true iff the file name contains a path (directory) component. + /// @brief Determine if the member has a path + bool hasPath() const { return flags&HasPathFlag; } + + /// Long filenames are an artifact of the ar(1) file format which allows + /// up to sixteen characters in its header and doesn't allow a path + /// separator character (/). To avoid this, a "long format" member name is + /// allowed that doesn't have this restriction. This method determines if + /// that "long format" is used for this member. + /// @returns true iff the file name uses the long form + /// @brief Determin if the member has a long file name + bool hasLongFilename() const { return flags&HasLongFilenameFlag; } + + /// This method returns the status info (like Unix stat(2)) for the archive + /// member. The status info provides the file's size, permissions, and + /// modification time. The contents of the Path::StatusInfo structure, other + /// than the size and modification time, may not have utility on non-Unix + /// systems. + /// @returns the status info for the archive member + /// @brief Obtain the status info for the archive member + const sys::FileStatus &getFileStatus() const { return info; } + + /// This method causes the archive member to be replaced with the contents + /// of the file specified by \p File. The contents of \p this will be + /// updated to reflect the new data from \p File. The \p File must exist and + /// be readable on entry to this method. + /// @returns true if an error occurred, false otherwise + /// @brief Replace contents of archive member with a new file. + bool replaceWith(const sys::Path &aFile, std::string* ErrMsg); + + /// @} + /// @name Data + /// @{ + private: + Archive* parent; ///< Pointer to parent archive + sys::PathWithStatus path; ///< Path of file containing the member + sys::FileStatus info; ///< Status info (size,mode,date) + unsigned flags; ///< Flags about the archive member + const void* data; ///< Data for the member + + /// @} + /// @name Constructors + /// @{ + public: + /// The default constructor is only used by the Archive's iplist when it + /// constructs the list's sentry node. + ArchiveMember(); + + private: + /// Used internally by the Archive class to construct an ArchiveMember. + /// The contents of the ArchiveMember are filled out by the Archive class. + explicit ArchiveMember(Archive *PAR); + + // So Archive can construct an ArchiveMember + friend class llvm::Archive; + /// @} +}; + +/// This class defines the interface to LLVM Archive files. The Archive class +/// presents the archive file as an ilist of ArchiveMember objects. The members +/// can be rearranged in any fashion either by directly editing the ilist or by +/// using editing methods on the Archive class (recommended). The Archive +/// class also provides several ways of accessing the archive file for various +/// purposes such as editing and linking. Full symbol table support is provided +/// for loading only those files that resolve symbols. Note that read +/// performance of this library is _crucial_ for performance of JIT type +/// applications and the linkers. Consequently, the implementation of the class +/// is optimized for reading. +class Archive { + + /// @name Types + /// @{ + public: + /// This is the ilist type over which users may iterate to examine + /// the contents of the archive + /// @brief The ilist type of ArchiveMembers that Archive contains. + typedef iplist<ArchiveMember> MembersList; + + /// @brief Forward mutable iterator over ArchiveMember + typedef MembersList::iterator iterator; + + /// @brief Forward immutable iterator over ArchiveMember + typedef MembersList::const_iterator const_iterator; + + /// @brief Reverse mutable iterator over ArchiveMember + typedef std::reverse_iterator<iterator> reverse_iterator; + + /// @brief Reverse immutable iterator over ArchiveMember + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + /// @brief The in-memory version of the symbol table + typedef std::map<std::string,unsigned> SymTabType; + + /// @} + /// @name ilist accessor methods + /// @{ + public: + inline iterator begin() { return members.begin(); } + inline const_iterator begin() const { return members.begin(); } + inline iterator end () { return members.end(); } + inline const_iterator end () const { return members.end(); } + + inline reverse_iterator rbegin() { return members.rbegin(); } + inline const_reverse_iterator rbegin() const { return members.rbegin(); } + inline reverse_iterator rend () { return members.rend(); } + inline const_reverse_iterator rend () const { return members.rend(); } + + inline size_t size() const { return members.size(); } + inline bool empty() const { return members.empty(); } + inline const ArchiveMember& front() const { return members.front(); } + inline ArchiveMember& front() { return members.front(); } + inline const ArchiveMember& back() const { return members.back(); } + inline ArchiveMember& back() { return members.back(); } + + /// @} + /// @name ilist mutator methods + /// @{ + public: + /// This method splices a \p src member from an archive (possibly \p this), + /// to a position just before the member given by \p dest in \p this. When + /// the archive is written, \p src will be written in its new location. + /// @brief Move a member to a new location + inline void splice(iterator dest, Archive& arch, iterator src) + { return members.splice(dest,arch.members,src); } + + /// This method erases a \p target member from the archive. When the + /// archive is written, it will no longer contain \p target. The associated + /// ArchiveMember is deleted. + /// @brief Erase a member. + inline iterator erase(iterator target) { return members.erase(target); } + + /// @} + /// @name Constructors + /// @{ + public: + /// Create an empty archive file and associate it with the \p Filename. This + /// method does not actually create the archive disk file. It creates an + /// empty Archive object. If the writeToDisk method is called, the archive + /// file \p Filename will be created at that point, with whatever content + /// the returned Archive object has at that time. + /// @returns An Archive* that represents the new archive file. + /// @brief Create an empty Archive. + static Archive* CreateEmpty( + const sys::Path& Filename ///< Name of the archive to (eventually) create. + ); + + /// Open an existing archive and load its contents in preparation for + /// editing. After this call, the member ilist is completely populated based + /// on the contents of the archive file. You should use this form of open if + /// you intend to modify the archive or traverse its contents (e.g. for + /// printing). + /// @brief Open and load an archive file + static Archive* OpenAndLoad( + const sys::Path& filePath, ///< The file path to open and load + std::string* ErrorMessage ///< An optional error string + ); + + /// This method opens an existing archive file from \p Filename and reads in + /// its symbol table without reading in any of the archive's members. This + /// reduces both I/O and cpu time in opening the archive if it is to be used + /// solely for symbol lookup (e.g. during linking). The \p Filename must + /// exist and be an archive file or an exception will be thrown. This form + /// of opening the archive is intended for read-only operations that need to + /// locate members via the symbol table for link editing. Since the archve + /// members are not read by this method, the archive will appear empty upon + /// return. If editing operations are performed on the archive, they will + /// completely replace the contents of the archive! It is recommended that + /// if this form of opening the archive is used that only the symbol table + /// lookup methods (getSymbolTable, findModuleDefiningSymbol, and + /// findModulesDefiningSymbols) be used. + /// @throws std::string if an error occurs opening the file + /// @returns an Archive* that represents the archive file. + /// @brief Open an existing archive and load its symbols. + static Archive* OpenAndLoadSymbols( + const sys::Path& Filename, ///< Name of the archive file to open + std::string* ErrorMessage=0 ///< An optional error string + ); + + /// This destructor cleans up the Archive object, releases all memory, and + /// closes files. It does nothing with the archive file on disk. If you + /// haven't used the writeToDisk method by the time the destructor is + /// called, all changes to the archive will be lost. + /// @throws std::string if an error occurs + /// @brief Destruct in-memory archive + ~Archive(); + + /// @} + /// @name Accessors + /// @{ + public: + /// @returns the path to the archive file. + /// @brief Get the archive path. + const sys::Path& getPath() { return archPath; } + + /// This method is provided so that editing methods can be invoked directly + /// on the Archive's iplist of ArchiveMember. However, it is recommended + /// that the usual STL style iterator interface be used instead. + /// @returns the iplist of ArchiveMember + /// @brief Get the iplist of the members + MembersList& getMembers() { return members; } + + /// This method allows direct query of the Archive's symbol table. The + /// symbol table is a std::map of std::string (the symbol) to unsigned (the + /// file offset). Note that for efficiency reasons, the offset stored in + /// the symbol table is not the actual offset. It is the offset from the + /// beginning of the first "real" file member (after the symbol table). Use + /// the getFirstFileOffset() to obtain that offset and add this value to the + /// offset in the symbol table to obtain the real file offset. Note that + /// there is purposefully no interface provided by Archive to look up + /// members by their offset. Use the findModulesDefiningSymbols and + /// findModuleDefiningSymbol methods instead. + /// @returns the Archive's symbol table. + /// @brief Get the archive's symbol table + const SymTabType& getSymbolTable() { return symTab; } + + /// This method returns the offset in the archive file to the first "real" + /// file member. Archive files, on disk, have a signature and might have a + /// symbol table that precedes the first actual file member. This method + /// allows you to determine what the size of those fields are. + /// @returns the offset to the first "real" file member in the archive. + /// @brief Get the offset to the first "real" file member in the archive. + unsigned getFirstFileOffset() { return firstFileOffset; } + + /// This method will scan the archive for bitcode modules, interpret them + /// and return a vector of the instantiated modules in \p Modules. If an + /// error occurs, this method will return true. If \p ErrMessage is not null + /// and an error occurs, \p *ErrMessage will be set to a string explaining + /// the error that occurred. + /// @returns true if an error occurred + /// @brief Instantiate all the bitcode modules located in the archive + bool getAllModules(std::vector<Module*>& Modules, std::string* ErrMessage); + + /// This accessor looks up the \p symbol in the archive's symbol table and + /// returns the associated module that defines that symbol. This method can + /// be called as many times as necessary. This is handy for linking the + /// archive into another module based on unresolved symbols. Note that the + /// ModuleProvider returned by this accessor should not be deleted by the + /// caller. It is managed internally by the Archive class. It is possible + /// that multiple calls to this accessor will return the same ModuleProvider + /// instance because the associated module defines multiple symbols. + /// @returns The ModuleProvider* found or null if the archive does not + /// contain a module that defines the \p symbol. + /// @brief Look up a module by symbol name. + ModuleProvider* findModuleDefiningSymbol( + const std::string& symbol, ///< Symbol to be sought + std::string* ErrMessage ///< Error message storage, if non-zero + ); + + /// This method is similar to findModuleDefiningSymbol but allows lookup of + /// more than one symbol at a time. If \p symbols contains a list of + /// undefined symbols in some module, then calling this method is like + /// making one complete pass through the archive to resolve symbols but is + /// more efficient than looking at the individual members. Note that on + /// exit, the symbols resolved by this method will be removed from \p + /// symbols to ensure they are not re-searched on a subsequent call. If + /// you need to retain the list of symbols, make a copy. + /// @brief Look up multiple symbols in the archive. + bool findModulesDefiningSymbols( + std::set<std::string>& symbols, ///< Symbols to be sought + std::set<ModuleProvider*>& modules, ///< The modules matching \p symbols + std::string* ErrMessage ///< Error msg storage, if non-zero + ); + + /// This method determines whether the archive is a properly formed llvm + /// bitcode archive. It first makes sure the symbol table has been loaded + /// and has a non-zero size. If it does, then it is an archive. If not, + /// then it tries to load all the bitcode modules of the archive. Finally, + /// it returns whether it was successfull. + /// @returns true if the archive is a proper llvm bitcode archive + /// @brief Determine whether the archive is a proper llvm bitcode archive. + bool isBitcodeArchive(); + + /// @} + /// @name Mutators + /// @{ + public: + /// This method is the only way to get the archive written to disk. It + /// creates or overwrites the file specified when \p this was created + /// or opened. The arguments provide options for writing the archive. If + /// \p CreateSymbolTable is true, the archive is scanned for bitcode files + /// and a symbol table of the externally visible function and global + /// variable names is created. If \p TruncateNames is true, the names of the + /// archive members will have their path component stripped and the file + /// name will be truncated at 15 characters. If \p Compress is specified, + /// all archive members will be compressed before being written. If + /// \p PrintSymTab is true, the symbol table will be printed to std::cout. + /// @returns true if an error occurred, \p error set to error message + /// @returns false if the writing succeeded. + /// @brief Write (possibly modified) archive contents to disk + bool writeToDisk( + bool CreateSymbolTable=false, ///< Create Symbol table + bool TruncateNames=false, ///< Truncate the filename to 15 chars + bool Compress=false, ///< Compress files + std::string* ErrMessage=0 ///< If non-null, where error msg is set + ); + + /// This method adds a new file to the archive. The \p filename is examined + /// to determine just enough information to create an ArchiveMember object + /// which is then inserted into the Archive object's ilist at the location + /// given by \p where. + /// @returns true if an error occured, false otherwise + /// @brief Add a file to the archive. + bool addFileBefore( + const sys::Path& filename, ///< The file to be added + iterator where, ///< Insertion point + std::string* ErrMsg ///< Optional error message location + ); + + /// @} + /// @name Implementation + /// @{ + protected: + /// @brief Construct an Archive for \p filename and optionally map it + /// into memory. + explicit Archive(const sys::Path& filename); + + /// @param data The symbol table data to be parsed + /// @param len The length of the symbol table data + /// @param error Set to address of a std::string to get error messages + /// @returns false on error + /// @brief Parse the symbol table at \p data. + bool parseSymbolTable(const void* data,unsigned len,std::string* error); + + /// @returns A fully populated ArchiveMember or 0 if an error occurred. + /// @brief Parse the header of a member starting at \p At + ArchiveMember* parseMemberHeader( + const char*&At, ///< The pointer to the location we're parsing + const char*End, ///< The pointer to the end of the archive + std::string* error ///< Optional error message catcher + ); + + /// @param ErrMessage Set to address of a std::string to get error messages + /// @returns false on error + /// @brief Check that the archive signature is correct + bool checkSignature(std::string* ErrMessage); + + /// @param ErrMessage Set to address of a std::string to get error messages + /// @returns false on error + /// @brief Load the entire archive. + bool loadArchive(std::string* ErrMessage); + + /// @param ErrMessage Set to address of a std::string to get error messages + /// @returns false on error + /// @brief Load just the symbol table. + bool loadSymbolTable(std::string* ErrMessage); + + /// @brief Write the symbol table to an ofstream. + void writeSymbolTable(std::ofstream& ARFile); + + /// Writes one ArchiveMember to an ofstream. If an error occurs, returns + /// false, otherwise true. If an error occurs and error is non-null then + /// it will be set to an error message. + /// @returns false Writing member succeeded + /// @returns true Writing member failed, \p error set to error message + bool writeMember( + const ArchiveMember& member, ///< The member to be written + std::ofstream& ARFile, ///< The file to write member onto + bool CreateSymbolTable, ///< Should symbol table be created? + bool TruncateNames, ///< Should names be truncated to 11 chars? + bool ShouldCompress, ///< Should the member be compressed? + std::string* ErrMessage ///< If non-null, place were error msg is set + ); + + /// @brief Fill in an ArchiveMemberHeader from ArchiveMember. + bool fillHeader(const ArchiveMember&mbr, + ArchiveMemberHeader& hdr,int sz, bool TruncateNames) const; + + /// @brief Maps archive into memory + bool mapToMemory(std::string* ErrMsg); + + /// @brief Frees all the members and unmaps the archive file. + void cleanUpMemory(); + + /// This type is used to keep track of bitcode modules loaded from the + /// symbol table. It maps the file offset to a pair that consists of the + /// associated ArchiveMember and the ModuleProvider. + /// @brief Module mapping type + typedef std::map<unsigned,std::pair<ModuleProvider*,ArchiveMember*> > + ModuleMap; + + + /// @} + /// @name Data + /// @{ + protected: + sys::Path archPath; ///< Path to the archive file we read/write + MembersList members; ///< The ilist of ArchiveMember + MemoryBuffer *mapfile; ///< Raw Archive contents mapped into memory + const char* base; ///< Base of the memory mapped file data + SymTabType symTab; ///< The symbol table + std::string strtab; ///< The string table for long file names + unsigned symTabSize; ///< Size in bytes of symbol table + unsigned firstFileOffset; ///< Offset to first normal file. + ModuleMap modules; ///< The modules loaded via symbol lookup. + ArchiveMember* foreignST; ///< This holds the foreign symbol table. + /// @} + /// @name Hidden + /// @{ + private: + Archive(); ///< Do not implement + Archive(const Archive&); ///< Do not implement + Archive& operator=(const Archive&); ///< Do not implement + /// @} +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/BitCodes.h b/include/llvm/Bitcode/BitCodes.h new file mode 100644 index 0000000..449dc35 --- /dev/null +++ b/include/llvm/Bitcode/BitCodes.h @@ -0,0 +1,185 @@ +//===- BitCodes.h - Enum values for the bitcode format ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header Bitcode enum values. +// +// The enum values defined in this file should be considered permanent. If +// new features are added, they should have values added at the end of the +// respective lists. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_BITCODES_H +#define LLVM_BITCODE_BITCODES_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/DataTypes.h" +#include <cassert> + +namespace llvm { +namespace bitc { + enum StandardWidths { + BlockIDWidth = 8, // We use VBR-8 for block IDs. + CodeLenWidth = 4, // Codelen are VBR-4. + BlockSizeWidth = 32 // BlockSize up to 2^32 32-bit words = 16GB per block. + }; + + // The standard abbrev namespace always has a way to exit a block, enter a + // nested block, define abbrevs, and define an unabbreviated record. + enum FixedAbbrevIDs { + END_BLOCK = 0, // Must be zero to guarantee termination for broken bitcode. + ENTER_SUBBLOCK = 1, + + /// DEFINE_ABBREV - Defines an abbrev for the current block. It consists + /// of a vbr5 for # operand infos. Each operand info is emitted with a + /// single bit to indicate if it is a literal encoding. If so, the value is + /// emitted with a vbr8. If not, the encoding is emitted as 3 bits followed + /// by the info value as a vbr5 if needed. + DEFINE_ABBREV = 2, + + // UNABBREV_RECORDs are emitted with a vbr6 for the record code, followed by + // a vbr6 for the # operands, followed by vbr6's for each operand. + UNABBREV_RECORD = 3, + + // This is not a code, this is a marker for the first abbrev assignment. + FIRST_APPLICATION_ABBREV = 4 + }; + + /// StandardBlockIDs - All bitcode files can optionally include a BLOCKINFO + /// block, which contains metadata about other blocks in the file. + enum StandardBlockIDs { + /// BLOCKINFO_BLOCK is used to define metadata about blocks, for example, + /// standard abbrevs that should be available to all blocks of a specified + /// ID. + BLOCKINFO_BLOCK_ID = 0, + + // Block IDs 1-7 are reserved for future expansion. + FIRST_APPLICATION_BLOCKID = 8 + }; + + /// BlockInfoCodes - The blockinfo block contains metadata about user-defined + /// blocks. + enum BlockInfoCodes { + // DEFINE_ABBREV has magic semantics here, applying to the current SETBID'd + // block, instead of the BlockInfo block. + + BLOCKINFO_CODE_SETBID = 1, // SETBID: [blockid#] + BLOCKINFO_CODE_BLOCKNAME = 2, // BLOCKNAME: [name] + BLOCKINFO_CODE_SETRECORDNAME = 3 // BLOCKINFO_CODE_SETRECORDNAME: [id, name] + }; + +} // End bitc namespace + +/// BitCodeAbbrevOp - This describes one or more operands in an abbreviation. +/// This is actually a union of two different things: +/// 1. It could be a literal integer value ("the operand is always 17"). +/// 2. It could be an encoding specification ("this operand encoded like so"). +/// +class BitCodeAbbrevOp { + uint64_t Val; // A literal value or data for an encoding. + bool IsLiteral : 1; // Indicate whether this is a literal value or not. + unsigned Enc : 3; // The encoding to use. +public: + enum Encoding { + Fixed = 1, // A fixed width field, Val specifies number of bits. + VBR = 2, // A VBR field where Val specifies the width of each chunk. + Array = 3, // A sequence of fields, next field species elt encoding. + Char6 = 4, // A 6-bit fixed field which maps to [a-zA-Z0-9._]. + Blob = 5 // 32-bit aligned array of 8-bit characters. + }; + + explicit BitCodeAbbrevOp(uint64_t V) : Val(V), IsLiteral(true) {} + explicit BitCodeAbbrevOp(Encoding E, uint64_t Data = 0) + : Val(Data), IsLiteral(false), Enc(E) {} + + bool isLiteral() const { return IsLiteral; } + bool isEncoding() const { return !IsLiteral; } + + // Accessors for literals. + uint64_t getLiteralValue() const { assert(isLiteral()); return Val; } + + // Accessors for encoding info. + Encoding getEncoding() const { assert(isEncoding()); return (Encoding)Enc; } + uint64_t getEncodingData() const { + assert(isEncoding() && hasEncodingData()); + return Val; + } + + bool hasEncodingData() const { return hasEncodingData(getEncoding()); } + static bool hasEncodingData(Encoding E) { + switch (E) { + default: assert(0 && "Unknown encoding"); + case Fixed: + case VBR: + return true; + case Array: + case Char6: + case Blob: + return false; + } + } + + /// isChar6 - Return true if this character is legal in the Char6 encoding. + static bool isChar6(char C) { + if (C >= 'a' && C <= 'z') return true; + if (C >= 'A' && C <= 'Z') return true; + if (C >= '0' && C <= '9') return true; + if (C == '.' || C == '_') return true; + return false; + } + static unsigned EncodeChar6(char C) { + if (C >= 'a' && C <= 'z') return C-'a'; + if (C >= 'A' && C <= 'Z') return C-'A'+26; + if (C >= '0' && C <= '9') return C-'0'+26+26; + if (C == '.') return 62; + if (C == '_') return 63; + assert(0 && "Not a value Char6 character!"); + return 0; + } + + static char DecodeChar6(unsigned V) { + assert((V & ~63) == 0 && "Not a Char6 encoded character!"); + if (V < 26) return V+'a'; + if (V < 26+26) return V-26+'A'; + if (V < 26+26+10) return V-26-26+'0'; + if (V == 62) return '.'; + if (V == 63) return '_'; + assert(0 && "Not a value Char6 character!"); + return ' '; + } + +}; + +/// BitCodeAbbrev - This class represents an abbreviation record. An +/// abbreviation allows a complex record that has redundancy to be stored in a +/// specialized format instead of the fully-general, fully-vbr, format. +class BitCodeAbbrev { + SmallVector<BitCodeAbbrevOp, 8> OperandList; + unsigned char RefCount; // Number of things using this. + ~BitCodeAbbrev() {} +public: + BitCodeAbbrev() : RefCount(1) {} + + void addRef() { ++RefCount; } + void dropRef() { if (--RefCount == 0) delete this; } + + unsigned getNumOperandInfos() const { + return static_cast<unsigned>(OperandList.size()); + } + const BitCodeAbbrevOp &getOperandInfo(unsigned N) const { + return OperandList[N]; + } + + void Add(const BitCodeAbbrevOp &OpInfo) { + OperandList.push_back(OpInfo); + } +}; +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/BitstreamReader.h b/include/llvm/Bitcode/BitstreamReader.h new file mode 100644 index 0000000..b7ae47d --- /dev/null +++ b/include/llvm/Bitcode/BitstreamReader.h @@ -0,0 +1,638 @@ +//===- BitstreamReader.h - Low-level bitstream reader interface -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header defines the BitstreamReader class. This class can be used to +// read an arbitrary bitstream, regardless of its contents. +// +//===----------------------------------------------------------------------===// + +#ifndef BITSTREAM_READER_H +#define BITSTREAM_READER_H + +#include "llvm/Bitcode/BitCodes.h" +#include <climits> +#include <vector> + +namespace llvm { + + class Deserializer; + +class BitstreamReader { +public: + /// BlockInfo - This contains information emitted to BLOCKINFO_BLOCK blocks. + /// These describe abbreviations that all blocks of the specified ID inherit. + struct BlockInfo { + unsigned BlockID; + std::vector<BitCodeAbbrev*> Abbrevs; + std::string Name; + + std::vector<std::pair<unsigned, std::string> > RecordNames; + }; +private: + /// FirstChar/LastChar - This remembers the first and last bytes of the + /// stream. + const unsigned char *FirstChar, *LastChar; + + std::vector<BlockInfo> BlockInfoRecords; + + /// IgnoreBlockInfoNames - This is set to true if we don't care about the + /// block/record name information in the BlockInfo block. Only llvm-bcanalyzer + /// uses this. + bool IgnoreBlockInfoNames; + + BitstreamReader(const BitstreamReader&); // NOT IMPLEMENTED + void operator=(const BitstreamReader&); // NOT IMPLEMENTED +public: + BitstreamReader() : FirstChar(0), LastChar(0), IgnoreBlockInfoNames(true) { + } + + BitstreamReader(const unsigned char *Start, const unsigned char *End) { + IgnoreBlockInfoNames = true; + init(Start, End); + } + + void init(const unsigned char *Start, const unsigned char *End) { + FirstChar = Start; + LastChar = End; + assert(((End-Start) & 3) == 0 &&"Bitcode stream not a multiple of 4 bytes"); + } + + ~BitstreamReader() { + // Free the BlockInfoRecords. + while (!BlockInfoRecords.empty()) { + BlockInfo &Info = BlockInfoRecords.back(); + // Free blockinfo abbrev info. + for (unsigned i = 0, e = static_cast<unsigned>(Info.Abbrevs.size()); + i != e; ++i) + Info.Abbrevs[i]->dropRef(); + BlockInfoRecords.pop_back(); + } + } + + const unsigned char *getFirstChar() const { return FirstChar; } + const unsigned char *getLastChar() const { return LastChar; } + + /// CollectBlockInfoNames - This is called by clients that want block/record + /// name information. + void CollectBlockInfoNames() { IgnoreBlockInfoNames = false; } + bool isIgnoringBlockInfoNames() { return IgnoreBlockInfoNames; } + + //===--------------------------------------------------------------------===// + // Block Manipulation + //===--------------------------------------------------------------------===// + + /// hasBlockInfoRecords - Return true if we've already read and processed the + /// block info block for this Bitstream. We only process it for the first + /// cursor that walks over it. + bool hasBlockInfoRecords() const { return !BlockInfoRecords.empty(); } + + /// getBlockInfo - If there is block info for the specified ID, return it, + /// otherwise return null. + const BlockInfo *getBlockInfo(unsigned BlockID) const { + // Common case, the most recent entry matches BlockID. + if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID) + return &BlockInfoRecords.back(); + + for (unsigned i = 0, e = static_cast<unsigned>(BlockInfoRecords.size()); + i != e; ++i) + if (BlockInfoRecords[i].BlockID == BlockID) + return &BlockInfoRecords[i]; + return 0; + } + + BlockInfo &getOrCreateBlockInfo(unsigned BlockID) { + if (const BlockInfo *BI = getBlockInfo(BlockID)) + return *const_cast<BlockInfo*>(BI); + + // Otherwise, add a new record. + BlockInfoRecords.push_back(BlockInfo()); + BlockInfoRecords.back().BlockID = BlockID; + return BlockInfoRecords.back(); + } + +}; + +class BitstreamCursor { + friend class Deserializer; + BitstreamReader *BitStream; + const unsigned char *NextChar; + + /// CurWord - This is the current data we have pulled from the stream but have + /// not returned to the client. + uint32_t CurWord; + + /// BitsInCurWord - This is the number of bits in CurWord that are valid. This + /// is always from [0...31] inclusive. + unsigned BitsInCurWord; + + // CurCodeSize - This is the declared size of code values used for the current + // block, in bits. + unsigned CurCodeSize; + + /// CurAbbrevs - Abbrevs installed at in this block. + std::vector<BitCodeAbbrev*> CurAbbrevs; + + struct Block { + unsigned PrevCodeSize; + std::vector<BitCodeAbbrev*> PrevAbbrevs; + explicit Block(unsigned PCS) : PrevCodeSize(PCS) {} + }; + + /// BlockScope - This tracks the codesize of parent blocks. + SmallVector<Block, 8> BlockScope; + +public: + BitstreamCursor() : BitStream(0), NextChar(0) { + } + BitstreamCursor(const BitstreamCursor &RHS) : BitStream(0), NextChar(0) { + operator=(RHS); + } + + explicit BitstreamCursor(BitstreamReader &R) : BitStream(&R) { + NextChar = R.getFirstChar(); + assert(NextChar && "Bitstream not initialized yet"); + CurWord = 0; + BitsInCurWord = 0; + CurCodeSize = 2; + } + + void init(BitstreamReader &R) { + freeState(); + + BitStream = &R; + NextChar = R.getFirstChar(); + assert(NextChar && "Bitstream not initialized yet"); + CurWord = 0; + BitsInCurWord = 0; + CurCodeSize = 2; + } + + ~BitstreamCursor() { + freeState(); + } + + void operator=(const BitstreamCursor &RHS) { + freeState(); + + BitStream = RHS.BitStream; + NextChar = RHS.NextChar; + CurWord = RHS.CurWord; + BitsInCurWord = RHS.BitsInCurWord; + CurCodeSize = RHS.CurCodeSize; + + // Copy abbreviations, and bump ref counts. + CurAbbrevs = RHS.CurAbbrevs; + for (unsigned i = 0, e = static_cast<unsigned>(CurAbbrevs.size()); + i != e; ++i) + CurAbbrevs[i]->addRef(); + + // Copy block scope and bump ref counts. + for (unsigned S = 0, e = static_cast<unsigned>(BlockScope.size()); + S != e; ++S) { + std::vector<BitCodeAbbrev*> &Abbrevs = BlockScope[S].PrevAbbrevs; + for (unsigned i = 0, e = static_cast<unsigned>(Abbrevs.size()); + i != e; ++i) + Abbrevs[i]->addRef(); + } + } + + void freeState() { + // Free all the Abbrevs. + for (unsigned i = 0, e = static_cast<unsigned>(CurAbbrevs.size()); + i != e; ++i) + CurAbbrevs[i]->dropRef(); + CurAbbrevs.clear(); + + // Free all the Abbrevs in the block scope. + for (unsigned S = 0, e = static_cast<unsigned>(BlockScope.size()); + S != e; ++S) { + std::vector<BitCodeAbbrev*> &Abbrevs = BlockScope[S].PrevAbbrevs; + for (unsigned i = 0, e = static_cast<unsigned>(Abbrevs.size()); + i != e; ++i) + Abbrevs[i]->dropRef(); + } + BlockScope.clear(); + } + + /// GetAbbrevIDWidth - Return the number of bits used to encode an abbrev #. + unsigned GetAbbrevIDWidth() const { return CurCodeSize; } + + bool AtEndOfStream() const { + return NextChar == BitStream->getLastChar() && BitsInCurWord == 0; + } + + /// GetCurrentBitNo - Return the bit # of the bit we are reading. + uint64_t GetCurrentBitNo() const { + return (NextChar-BitStream->getFirstChar())*CHAR_BIT - BitsInCurWord; + } + + BitstreamReader *getBitStreamReader() { + return BitStream; + } + const BitstreamReader *getBitStreamReader() const { + return BitStream; + } + + + /// JumpToBit - Reset the stream to the specified bit number. + void JumpToBit(uint64_t BitNo) { + uintptr_t ByteNo = uintptr_t(BitNo/8) & ~3; + uintptr_t WordBitNo = uintptr_t(BitNo) & 31; + assert(ByteNo <= (uintptr_t)(BitStream->getLastChar()- + BitStream->getFirstChar()) && + "Invalid location"); + + // Move the cursor to the right word. + NextChar = BitStream->getFirstChar()+ByteNo; + BitsInCurWord = 0; + CurWord = 0; + + // Skip over any bits that are already consumed. + if (WordBitNo) + Read(static_cast<unsigned>(WordBitNo)); + } + + + uint32_t Read(unsigned NumBits) { + // If the field is fully contained by CurWord, return it quickly. + if (BitsInCurWord >= NumBits) { + uint32_t R = CurWord & ((1U << NumBits)-1); + CurWord >>= NumBits; + BitsInCurWord -= NumBits; + return R; + } + + // If we run out of data, stop at the end of the stream. + if (NextChar == BitStream->getLastChar()) { + CurWord = 0; + BitsInCurWord = 0; + return 0; + } + + unsigned R = CurWord; + + // Read the next word from the stream. + CurWord = (NextChar[0] << 0) | (NextChar[1] << 8) | + (NextChar[2] << 16) | (NextChar[3] << 24); + NextChar += 4; + + // Extract NumBits-BitsInCurWord from what we just read. + unsigned BitsLeft = NumBits-BitsInCurWord; + + // Be careful here, BitsLeft is in the range [1..32] inclusive. + R |= (CurWord & (~0U >> (32-BitsLeft))) << BitsInCurWord; + + // BitsLeft bits have just been used up from CurWord. + if (BitsLeft != 32) + CurWord >>= BitsLeft; + else + CurWord = 0; + BitsInCurWord = 32-BitsLeft; + return R; + } + + uint64_t Read64(unsigned NumBits) { + if (NumBits <= 32) return Read(NumBits); + + uint64_t V = Read(32); + return V | (uint64_t)Read(NumBits-32) << 32; + } + + uint32_t ReadVBR(unsigned NumBits) { + uint32_t Piece = Read(NumBits); + if ((Piece & (1U << (NumBits-1))) == 0) + return Piece; + + uint32_t Result = 0; + unsigned NextBit = 0; + while (1) { + Result |= (Piece & ((1U << (NumBits-1))-1)) << NextBit; + + if ((Piece & (1U << (NumBits-1))) == 0) + return Result; + + NextBit += NumBits-1; + Piece = Read(NumBits); + } + } + + uint64_t ReadVBR64(unsigned NumBits) { + uint64_t Piece = Read(NumBits); + if ((Piece & (1U << (NumBits-1))) == 0) + return Piece; + + uint64_t Result = 0; + unsigned NextBit = 0; + while (1) { + Result |= (Piece & ((1U << (NumBits-1))-1)) << NextBit; + + if ((Piece & (1U << (NumBits-1))) == 0) + return Result; + + NextBit += NumBits-1; + Piece = Read(NumBits); + } + } + + void SkipToWord() { + BitsInCurWord = 0; + CurWord = 0; + } + + unsigned ReadCode() { + return Read(CurCodeSize); + } + + + // Block header: + // [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen] + + /// ReadSubBlockID - Having read the ENTER_SUBBLOCK code, read the BlockID for + /// the block. + unsigned ReadSubBlockID() { + return ReadVBR(bitc::BlockIDWidth); + } + + /// SkipBlock - Having read the ENTER_SUBBLOCK abbrevid and a BlockID, skip + /// over the body of this block. If the block record is malformed, return + /// true. + bool SkipBlock() { + // Read and ignore the codelen value. Since we are skipping this block, we + // don't care what code widths are used inside of it. + ReadVBR(bitc::CodeLenWidth); + SkipToWord(); + unsigned NumWords = Read(bitc::BlockSizeWidth); + + // Check that the block wasn't partially defined, and that the offset isn't + // bogus. + if (AtEndOfStream() || NextChar+NumWords*4 > BitStream->getLastChar()) + return true; + + NextChar += NumWords*4; + return false; + } + + /// EnterSubBlock - Having read the ENTER_SUBBLOCK abbrevid, enter + /// the block, and return true if the block is valid. + bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = 0) { + // Save the current block's state on BlockScope. + BlockScope.push_back(Block(CurCodeSize)); + BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); + + // Add the abbrevs specific to this block to the CurAbbrevs list. + if (const BitstreamReader::BlockInfo *Info = + BitStream->getBlockInfo(BlockID)) { + for (unsigned i = 0, e = static_cast<unsigned>(Info->Abbrevs.size()); + i != e; ++i) { + CurAbbrevs.push_back(Info->Abbrevs[i]); + CurAbbrevs.back()->addRef(); + } + } + + // Get the codesize of this block. + CurCodeSize = ReadVBR(bitc::CodeLenWidth); + SkipToWord(); + unsigned NumWords = Read(bitc::BlockSizeWidth); + if (NumWordsP) *NumWordsP = NumWords; + + // Validate that this block is sane. + if (CurCodeSize == 0 || AtEndOfStream() || + NextChar+NumWords*4 > BitStream->getLastChar()) + return true; + + return false; + } + + bool ReadBlockEnd() { + if (BlockScope.empty()) return true; + + // Block tail: + // [END_BLOCK, <align4bytes>] + SkipToWord(); + + PopBlockScope(); + return false; + } + +private: + void PopBlockScope() { + CurCodeSize = BlockScope.back().PrevCodeSize; + + // Delete abbrevs from popped scope. + for (unsigned i = 0, e = static_cast<unsigned>(CurAbbrevs.size()); + i != e; ++i) + CurAbbrevs[i]->dropRef(); + + BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); + BlockScope.pop_back(); + } + + //===--------------------------------------------------------------------===// + // Record Processing + //===--------------------------------------------------------------------===// + +private: + void ReadAbbreviatedLiteral(const BitCodeAbbrevOp &Op, + SmallVectorImpl<uint64_t> &Vals) { + assert(Op.isLiteral() && "Not a literal"); + // If the abbrev specifies the literal value to use, use it. + Vals.push_back(Op.getLiteralValue()); + } + + void ReadAbbreviatedField(const BitCodeAbbrevOp &Op, + SmallVectorImpl<uint64_t> &Vals) { + assert(!Op.isLiteral() && "Use ReadAbbreviatedLiteral for literals!"); + + // Decode the value as we are commanded. + switch (Op.getEncoding()) { + default: assert(0 && "Unknown encoding!"); + case BitCodeAbbrevOp::Fixed: + Vals.push_back(Read((unsigned)Op.getEncodingData())); + break; + case BitCodeAbbrevOp::VBR: + Vals.push_back(ReadVBR64((unsigned)Op.getEncodingData())); + break; + case BitCodeAbbrevOp::Char6: + Vals.push_back(BitCodeAbbrevOp::DecodeChar6(Read(6))); + break; + } + } +public: + + /// getAbbrev - Return the abbreviation for the specified AbbrevId. + const BitCodeAbbrev *getAbbrev(unsigned AbbrevID) { + unsigned AbbrevNo = AbbrevID-bitc::FIRST_APPLICATION_ABBREV; + assert(AbbrevNo < CurAbbrevs.size() && "Invalid abbrev #!"); + return CurAbbrevs[AbbrevNo]; + } + + unsigned ReadRecord(unsigned AbbrevID, SmallVectorImpl<uint64_t> &Vals, + const char **BlobStart = 0, unsigned *BlobLen = 0) { + if (AbbrevID == bitc::UNABBREV_RECORD) { + unsigned Code = ReadVBR(6); + unsigned NumElts = ReadVBR(6); + for (unsigned i = 0; i != NumElts; ++i) + Vals.push_back(ReadVBR64(6)); + return Code; + } + + const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID); + + for (unsigned i = 0, e = Abbv->getNumOperandInfos(); i != e; ++i) { + const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); + if (Op.isLiteral()) { + ReadAbbreviatedLiteral(Op, Vals); + } else if (Op.getEncoding() == BitCodeAbbrevOp::Array) { + // Array case. Read the number of elements as a vbr6. + unsigned NumElts = ReadVBR(6); + + // Get the element encoding. + assert(i+2 == e && "array op not second to last?"); + const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i); + + // Read all the elements. + for (; NumElts; --NumElts) + ReadAbbreviatedField(EltEnc, Vals); + } else if (Op.getEncoding() == BitCodeAbbrevOp::Blob) { + // Blob case. Read the number of bytes as a vbr6. + unsigned NumElts = ReadVBR(6); + SkipToWord(); // 32-bit alignment + + // Figure out where the end of this blob will be including tail padding. + const unsigned char *NewEnd = NextChar+((NumElts+3)&~3); + + // If this would read off the end of the bitcode file, just set the + // record to empty and return. + if (NewEnd > BitStream->getLastChar()) { + Vals.append(NumElts, 0); + NextChar = BitStream->getLastChar(); + break; + } + + // Otherwise, read the number of bytes. If we can return a reference to + // the data, do so to avoid copying it. + if (BlobStart) { + *BlobStart = (const char*)NextChar; + *BlobLen = NumElts; + } else { + for (; NumElts; ++NextChar, --NumElts) + Vals.push_back(*NextChar); + } + // Skip over tail padding. + NextChar = NewEnd; + } else { + ReadAbbreviatedField(Op, Vals); + } + } + + unsigned Code = (unsigned)Vals[0]; + Vals.erase(Vals.begin()); + return Code; + } + + unsigned ReadRecord(unsigned AbbrevID, SmallVectorImpl<uint64_t> &Vals, + const char *&BlobStart, unsigned &BlobLen) { + return ReadRecord(AbbrevID, Vals, &BlobStart, &BlobLen); + } + + + //===--------------------------------------------------------------------===// + // Abbrev Processing + //===--------------------------------------------------------------------===// + + void ReadAbbrevRecord() { + BitCodeAbbrev *Abbv = new BitCodeAbbrev(); + unsigned NumOpInfo = ReadVBR(5); + for (unsigned i = 0; i != NumOpInfo; ++i) { + bool IsLiteral = Read(1) ? true : false; + if (IsLiteral) { + Abbv->Add(BitCodeAbbrevOp(ReadVBR64(8))); + continue; + } + + BitCodeAbbrevOp::Encoding E = (BitCodeAbbrevOp::Encoding)Read(3); + if (BitCodeAbbrevOp::hasEncodingData(E)) + Abbv->Add(BitCodeAbbrevOp(E, ReadVBR64(5))); + else + Abbv->Add(BitCodeAbbrevOp(E)); + } + CurAbbrevs.push_back(Abbv); + } + +public: + + bool ReadBlockInfoBlock() { + // If this is the second stream to get to the block info block, skip it. + if (BitStream->hasBlockInfoRecords()) + return SkipBlock(); + + if (EnterSubBlock(bitc::BLOCKINFO_BLOCK_ID)) return true; + + SmallVector<uint64_t, 64> Record; + BitstreamReader::BlockInfo *CurBlockInfo = 0; + + // Read all the records for this module. + while (1) { + unsigned Code = ReadCode(); + if (Code == bitc::END_BLOCK) + return ReadBlockEnd(); + if (Code == bitc::ENTER_SUBBLOCK) { + ReadSubBlockID(); + if (SkipBlock()) return true; + continue; + } + + // Read abbrev records, associate them with CurBID. + if (Code == bitc::DEFINE_ABBREV) { + if (!CurBlockInfo) return true; + ReadAbbrevRecord(); + + // ReadAbbrevRecord installs the abbrev in CurAbbrevs. Move it to the + // appropriate BlockInfo. + BitCodeAbbrev *Abbv = CurAbbrevs.back(); + CurAbbrevs.pop_back(); + CurBlockInfo->Abbrevs.push_back(Abbv); + continue; + } + + // Read a record. + Record.clear(); + switch (ReadRecord(Code, Record)) { + default: break; // Default behavior, ignore unknown content. + case bitc::BLOCKINFO_CODE_SETBID: + if (Record.size() < 1) return true; + CurBlockInfo = &BitStream->getOrCreateBlockInfo((unsigned)Record[0]); + break; + case bitc::BLOCKINFO_CODE_BLOCKNAME: { + if (!CurBlockInfo) return true; + if (BitStream->isIgnoringBlockInfoNames()) break; // Ignore name. + std::string Name; + for (unsigned i = 0, e = Record.size(); i != e; ++i) + Name += (char)Record[i]; + CurBlockInfo->Name = Name; + break; + } + case bitc::BLOCKINFO_CODE_SETRECORDNAME: { + if (!CurBlockInfo) return true; + if (BitStream->isIgnoringBlockInfoNames()) break; // Ignore name. + std::string Name; + for (unsigned i = 1, e = Record.size(); i != e; ++i) + Name += (char)Record[i]; + CurBlockInfo->RecordNames.push_back(std::make_pair((unsigned)Record[0], + Name)); + break; + } + } + } + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/BitstreamWriter.h b/include/llvm/Bitcode/BitstreamWriter.h new file mode 100644 index 0000000..55dd4dd --- /dev/null +++ b/include/llvm/Bitcode/BitstreamWriter.h @@ -0,0 +1,517 @@ +//===- BitstreamWriter.h - Low-level bitstream writer interface -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header defines the BitstreamWriter class. This class can be used to +// write an arbitrary bitstream, regardless of its contents. +// +//===----------------------------------------------------------------------===// + +#ifndef BITSTREAM_WRITER_H +#define BITSTREAM_WRITER_H + +#include "llvm/Bitcode/BitCodes.h" +#include <vector> + +namespace llvm { + +class BitstreamWriter { + std::vector<unsigned char> &Out; + + /// CurBit - Always between 0 and 31 inclusive, specifies the next bit to use. + unsigned CurBit; + + /// CurValue - The current value. Only bits < CurBit are valid. + uint32_t CurValue; + + /// CurCodeSize - This is the declared size of code values used for the + /// current block, in bits. + unsigned CurCodeSize; + + /// BlockInfoCurBID - When emitting a BLOCKINFO_BLOCK, this is the currently + /// selected BLOCK ID. + unsigned BlockInfoCurBID; + + /// CurAbbrevs - Abbrevs installed at in this block. + std::vector<BitCodeAbbrev*> CurAbbrevs; + + struct Block { + unsigned PrevCodeSize; + unsigned StartSizeWord; + std::vector<BitCodeAbbrev*> PrevAbbrevs; + Block(unsigned PCS, unsigned SSW) : PrevCodeSize(PCS), StartSizeWord(SSW) {} + }; + + /// BlockScope - This tracks the current blocks that we have entered. + std::vector<Block> BlockScope; + + /// BlockInfo - This contains information emitted to BLOCKINFO_BLOCK blocks. + /// These describe abbreviations that all blocks of the specified ID inherit. + struct BlockInfo { + unsigned BlockID; + std::vector<BitCodeAbbrev*> Abbrevs; + }; + std::vector<BlockInfo> BlockInfoRecords; + +public: + explicit BitstreamWriter(std::vector<unsigned char> &O) + : Out(O), CurBit(0), CurValue(0), CurCodeSize(2) {} + + ~BitstreamWriter() { + assert(CurBit == 0 && "Unflused data remaining"); + assert(BlockScope.empty() && CurAbbrevs.empty() && "Block imbalance"); + + // Free the BlockInfoRecords. + while (!BlockInfoRecords.empty()) { + BlockInfo &Info = BlockInfoRecords.back(); + // Free blockinfo abbrev info. + for (unsigned i = 0, e = static_cast<unsigned>(Info.Abbrevs.size()); + i != e; ++i) + Info.Abbrevs[i]->dropRef(); + BlockInfoRecords.pop_back(); + } + } + + std::vector<unsigned char> &getBuffer() { return Out; } + + /// \brief Retrieve the current position in the stream, in bits. + uint64_t GetCurrentBitNo() const { return Out.size() * 8 + CurBit; } + + //===--------------------------------------------------------------------===// + // Basic Primitives for emitting bits to the stream. + //===--------------------------------------------------------------------===// + + void Emit(uint32_t Val, unsigned NumBits) { + assert(NumBits <= 32 && "Invalid value size!"); + assert((Val & ~(~0U >> (32-NumBits))) == 0 && "High bits set!"); + CurValue |= Val << CurBit; + if (CurBit + NumBits < 32) { + CurBit += NumBits; + return; + } + + // Add the current word. + unsigned V = CurValue; + Out.push_back((unsigned char)(V >> 0)); + Out.push_back((unsigned char)(V >> 8)); + Out.push_back((unsigned char)(V >> 16)); + Out.push_back((unsigned char)(V >> 24)); + + if (CurBit) + CurValue = Val >> (32-CurBit); + else + CurValue = 0; + CurBit = (CurBit+NumBits) & 31; + } + + void Emit64(uint64_t Val, unsigned NumBits) { + if (NumBits <= 32) + Emit((uint32_t)Val, NumBits); + else { + Emit((uint32_t)Val, 32); + Emit((uint32_t)(Val >> 32), NumBits-32); + } + } + + void FlushToWord() { + if (CurBit) { + unsigned V = CurValue; + Out.push_back((unsigned char)(V >> 0)); + Out.push_back((unsigned char)(V >> 8)); + Out.push_back((unsigned char)(V >> 16)); + Out.push_back((unsigned char)(V >> 24)); + CurBit = 0; + CurValue = 0; + } + } + + void EmitVBR(uint32_t Val, unsigned NumBits) { + uint32_t Threshold = 1U << (NumBits-1); + + // Emit the bits with VBR encoding, NumBits-1 bits at a time. + while (Val >= Threshold) { + Emit((Val & ((1 << (NumBits-1))-1)) | (1 << (NumBits-1)), NumBits); + Val >>= NumBits-1; + } + + Emit(Val, NumBits); + } + + void EmitVBR64(uint64_t Val, unsigned NumBits) { + if ((uint32_t)Val == Val) + return EmitVBR((uint32_t)Val, NumBits); + + uint64_t Threshold = 1U << (NumBits-1); + + // Emit the bits with VBR encoding, NumBits-1 bits at a time. + while (Val >= Threshold) { + Emit(((uint32_t)Val & ((1 << (NumBits-1))-1)) | + (1 << (NumBits-1)), NumBits); + Val >>= NumBits-1; + } + + Emit((uint32_t)Val, NumBits); + } + + /// EmitCode - Emit the specified code. + void EmitCode(unsigned Val) { + Emit(Val, CurCodeSize); + } + + // BackpatchWord - Backpatch a 32-bit word in the output with the specified + // value. + void BackpatchWord(unsigned ByteNo, unsigned NewWord) { + Out[ByteNo++] = (unsigned char)(NewWord >> 0); + Out[ByteNo++] = (unsigned char)(NewWord >> 8); + Out[ByteNo++] = (unsigned char)(NewWord >> 16); + Out[ByteNo ] = (unsigned char)(NewWord >> 24); + } + + //===--------------------------------------------------------------------===// + // Block Manipulation + //===--------------------------------------------------------------------===// + + /// getBlockInfo - If there is block info for the specified ID, return it, + /// otherwise return null. + BlockInfo *getBlockInfo(unsigned BlockID) { + // Common case, the most recent entry matches BlockID. + if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID) + return &BlockInfoRecords.back(); + + for (unsigned i = 0, e = static_cast<unsigned>(BlockInfoRecords.size()); + i != e; ++i) + if (BlockInfoRecords[i].BlockID == BlockID) + return &BlockInfoRecords[i]; + return 0; + } + + void EnterSubblock(unsigned BlockID, unsigned CodeLen) { + // Block header: + // [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen] + EmitCode(bitc::ENTER_SUBBLOCK); + EmitVBR(BlockID, bitc::BlockIDWidth); + EmitVBR(CodeLen, bitc::CodeLenWidth); + FlushToWord(); + + unsigned BlockSizeWordLoc = static_cast<unsigned>(Out.size()); + unsigned OldCodeSize = CurCodeSize; + + // Emit a placeholder, which will be replaced when the block is popped. + Emit(0, bitc::BlockSizeWidth); + + CurCodeSize = CodeLen; + + // Push the outer block's abbrev set onto the stack, start out with an + // empty abbrev set. + BlockScope.push_back(Block(OldCodeSize, BlockSizeWordLoc/4)); + BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); + + // If there is a blockinfo for this BlockID, add all the predefined abbrevs + // to the abbrev list. + if (BlockInfo *Info = getBlockInfo(BlockID)) { + for (unsigned i = 0, e = static_cast<unsigned>(Info->Abbrevs.size()); + i != e; ++i) { + CurAbbrevs.push_back(Info->Abbrevs[i]); + Info->Abbrevs[i]->addRef(); + } + } + } + + void ExitBlock() { + assert(!BlockScope.empty() && "Block scope imbalance!"); + + // Delete all abbrevs. + for (unsigned i = 0, e = static_cast<unsigned>(CurAbbrevs.size()); + i != e; ++i) + CurAbbrevs[i]->dropRef(); + + const Block &B = BlockScope.back(); + + // Block tail: + // [END_BLOCK, <align4bytes>] + EmitCode(bitc::END_BLOCK); + FlushToWord(); + + // Compute the size of the block, in words, not counting the size field. + unsigned SizeInWords= static_cast<unsigned>(Out.size())/4-B.StartSizeWord-1; + unsigned ByteNo = B.StartSizeWord*4; + + // Update the block size field in the header of this sub-block. + BackpatchWord(ByteNo, SizeInWords); + + // Restore the inner block's code size and abbrev table. + CurCodeSize = B.PrevCodeSize; + BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); + BlockScope.pop_back(); + } + + //===--------------------------------------------------------------------===// + // Record Emission + //===--------------------------------------------------------------------===// + +private: + /// EmitAbbreviatedLiteral - Emit a literal value according to its abbrev + /// record. This is a no-op, since the abbrev specifies the literal to use. + template<typename uintty> + void EmitAbbreviatedLiteral(const BitCodeAbbrevOp &Op, uintty V) { + assert(Op.isLiteral() && "Not a literal"); + // If the abbrev specifies the literal value to use, don't emit + // anything. + assert(V == Op.getLiteralValue() && + "Invalid abbrev for record!"); + } + + /// EmitAbbreviatedField - Emit a single scalar field value with the specified + /// encoding. + template<typename uintty> + void EmitAbbreviatedField(const BitCodeAbbrevOp &Op, uintty V) { + assert(!Op.isLiteral() && "Literals should use EmitAbbreviatedLiteral!"); + + // Encode the value as we are commanded. + switch (Op.getEncoding()) { + default: assert(0 && "Unknown encoding!"); + case BitCodeAbbrevOp::Fixed: + Emit((unsigned)V, (unsigned)Op.getEncodingData()); + break; + case BitCodeAbbrevOp::VBR: + EmitVBR64(V, (unsigned)Op.getEncodingData()); + break; + case BitCodeAbbrevOp::Char6: + Emit(BitCodeAbbrevOp::EncodeChar6((char)V), 6); + break; + } + } + + /// EmitRecordWithAbbrevImpl - This is the core implementation of the record + /// emission code. If BlobData is non-null, then it specifies an array of + /// data that should be emitted as part of the Blob or Array operand that is + /// known to exist at the end of the the record. + template<typename uintty> + void EmitRecordWithAbbrevImpl(unsigned Abbrev, SmallVectorImpl<uintty> &Vals, + const char *BlobData, unsigned BlobLen) { + unsigned AbbrevNo = Abbrev-bitc::FIRST_APPLICATION_ABBREV; + assert(AbbrevNo < CurAbbrevs.size() && "Invalid abbrev #!"); + BitCodeAbbrev *Abbv = CurAbbrevs[AbbrevNo]; + + EmitCode(Abbrev); + + unsigned RecordIdx = 0; + for (unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos()); + i != e; ++i) { + const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); + if (Op.isLiteral()) { + assert(RecordIdx < Vals.size() && "Invalid abbrev/record"); + EmitAbbreviatedLiteral(Op, Vals[RecordIdx]); + ++RecordIdx; + } else if (Op.getEncoding() == BitCodeAbbrevOp::Array) { + // Array case. + assert(i+2 == e && "array op not second to last?"); + const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i); + + // If this record has blob data, emit it, otherwise we must have record + // entries to encode this way. + if (BlobData) { + assert(RecordIdx == Vals.size() && + "Blob data and record entries specified for array!"); + // Emit a vbr6 to indicate the number of elements present. + EmitVBR(static_cast<uint32_t>(BlobLen), 6); + + // Emit each field. + for (unsigned i = 0; i != BlobLen; ++i) + EmitAbbreviatedField(EltEnc, (unsigned char)BlobData[i]); + + // Know that blob data is consumed for assertion below. + BlobData = 0; + } else { + // Emit a vbr6 to indicate the number of elements present. + EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6); + + // Emit each field. + for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx) + EmitAbbreviatedField(EltEnc, Vals[RecordIdx]); + } + } else if (Op.getEncoding() == BitCodeAbbrevOp::Blob) { + // If this record has blob data, emit it, otherwise we must have record + // entries to encode this way. + + // Emit a vbr6 to indicate the number of elements present. + if (BlobData) { + EmitVBR(static_cast<uint32_t>(BlobLen), 6); + assert(RecordIdx == Vals.size() && + "Blob data and record entries specified for blob operand!"); + } else { + EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6); + } + + // Flush to a 32-bit alignment boundary. + FlushToWord(); + assert((Out.size() & 3) == 0 && "Not 32-bit aligned"); + + // Emit each field as a literal byte. + if (BlobData) { + for (unsigned i = 0; i != BlobLen; ++i) + Out.push_back((unsigned char)BlobData[i]); + + // Know that blob data is consumed for assertion below. + BlobData = 0; + } else { + for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx) { + assert(Vals[RecordIdx] < 256 && "Value too large to emit as blob"); + Out.push_back((unsigned char)Vals[RecordIdx]); + } + } + // Align end to 32-bits. + while (Out.size() & 3) + Out.push_back(0); + + } else { // Single scalar field. + assert(RecordIdx < Vals.size() && "Invalid abbrev/record"); + EmitAbbreviatedField(Op, Vals[RecordIdx]); + ++RecordIdx; + } + } + assert(RecordIdx == Vals.size() && "Not all record operands emitted!"); + assert(BlobData == 0 && + "Blob data specified for record that doesn't use it!"); + } + +public: + + /// EmitRecord - Emit the specified record to the stream, using an abbrev if + /// we have one to compress the output. + template<typename uintty> + void EmitRecord(unsigned Code, SmallVectorImpl<uintty> &Vals, + unsigned Abbrev = 0) { + if (!Abbrev) { + // If we don't have an abbrev to use, emit this in its fully unabbreviated + // form. + EmitCode(bitc::UNABBREV_RECORD); + EmitVBR(Code, 6); + EmitVBR(static_cast<uint32_t>(Vals.size()), 6); + for (unsigned i = 0, e = static_cast<unsigned>(Vals.size()); i != e; ++i) + EmitVBR64(Vals[i], 6); + return; + } + + // Insert the code into Vals to treat it uniformly. + Vals.insert(Vals.begin(), Code); + + EmitRecordWithAbbrev(Abbrev, Vals); + } + + /// EmitRecordWithAbbrev - Emit a record with the specified abbreviation. + /// Unlike EmitRecord, the code for the record should be included in Vals as + /// the first entry. + template<typename uintty> + void EmitRecordWithAbbrev(unsigned Abbrev, SmallVectorImpl<uintty> &Vals) { + EmitRecordWithAbbrevImpl(Abbrev, Vals, 0, 0); + } + + /// EmitRecordWithBlob - Emit the specified record to the stream, using an + /// abbrev that includes a blob at the end. The blob data to emit is + /// specified by the pointer and length specified at the end. In contrast to + /// EmitRecord, this routine expects that the first entry in Vals is the code + /// of the record. + template<typename uintty> + void EmitRecordWithBlob(unsigned Abbrev, SmallVectorImpl<uintty> &Vals, + const char *BlobData, unsigned BlobLen) { + EmitRecordWithAbbrevImpl(Abbrev, Vals, BlobData, BlobLen); + } + + /// EmitRecordWithArray - Just like EmitRecordWithBlob, works with records + /// that end with an array. + template<typename uintty> + void EmitRecordWithArray(unsigned Abbrev, SmallVectorImpl<uintty> &Vals, + const char *ArrayData, unsigned ArrayLen) { + EmitRecordWithAbbrevImpl(Abbrev, Vals, ArrayData, ArrayLen); + } + + //===--------------------------------------------------------------------===// + // Abbrev Emission + //===--------------------------------------------------------------------===// + +private: + // Emit the abbreviation as a DEFINE_ABBREV record. + void EncodeAbbrev(BitCodeAbbrev *Abbv) { + EmitCode(bitc::DEFINE_ABBREV); + EmitVBR(Abbv->getNumOperandInfos(), 5); + for (unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos()); + i != e; ++i) { + const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); + Emit(Op.isLiteral(), 1); + if (Op.isLiteral()) { + EmitVBR64(Op.getLiteralValue(), 8); + } else { + Emit(Op.getEncoding(), 3); + if (Op.hasEncodingData()) + EmitVBR64(Op.getEncodingData(), 5); + } + } + } +public: + + /// EmitAbbrev - This emits an abbreviation to the stream. Note that this + /// method takes ownership of the specified abbrev. + unsigned EmitAbbrev(BitCodeAbbrev *Abbv) { + // Emit the abbreviation as a record. + EncodeAbbrev(Abbv); + CurAbbrevs.push_back(Abbv); + return static_cast<unsigned>(CurAbbrevs.size())-1 + + bitc::FIRST_APPLICATION_ABBREV; + } + + //===--------------------------------------------------------------------===// + // BlockInfo Block Emission + //===--------------------------------------------------------------------===// + + /// EnterBlockInfoBlock - Start emitting the BLOCKINFO_BLOCK. + void EnterBlockInfoBlock(unsigned CodeWidth) { + EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, CodeWidth); + BlockInfoCurBID = -1U; + } +private: + /// SwitchToBlockID - If we aren't already talking about the specified block + /// ID, emit a BLOCKINFO_CODE_SETBID record. + void SwitchToBlockID(unsigned BlockID) { + if (BlockInfoCurBID == BlockID) return; + SmallVector<unsigned, 2> V; + V.push_back(BlockID); + EmitRecord(bitc::BLOCKINFO_CODE_SETBID, V); + BlockInfoCurBID = BlockID; + } + + BlockInfo &getOrCreateBlockInfo(unsigned BlockID) { + if (BlockInfo *BI = getBlockInfo(BlockID)) + return *BI; + + // Otherwise, add a new record. + BlockInfoRecords.push_back(BlockInfo()); + BlockInfoRecords.back().BlockID = BlockID; + return BlockInfoRecords.back(); + } + +public: + + /// EmitBlockInfoAbbrev - Emit a DEFINE_ABBREV record for the specified + /// BlockID. + unsigned EmitBlockInfoAbbrev(unsigned BlockID, BitCodeAbbrev *Abbv) { + SwitchToBlockID(BlockID); + EncodeAbbrev(Abbv); + + // Add the abbrev to the specified block record. + BlockInfo &Info = getOrCreateBlockInfo(BlockID); + Info.Abbrevs.push_back(Abbv); + + return Info.Abbrevs.size()-1+bitc::FIRST_APPLICATION_ABBREV; + } +}; + + +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/Deserialize.h b/include/llvm/Bitcode/Deserialize.h new file mode 100644 index 0000000..3e90227 --- /dev/null +++ b/include/llvm/Bitcode/Deserialize.h @@ -0,0 +1,517 @@ +//=- Deserialize.h - Generic Object Deserialization from Bitcode --*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface for generic object deserialization from +// LLVM bitcode. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_SERIALIZE_INPUT +#define LLVM_BITCODE_SERIALIZE_INPUT + +#include "llvm/Bitcode/BitstreamReader.h" +#include "llvm/Bitcode/Serialization.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/DataTypes.h" +#include <vector> + +namespace llvm { + +class Deserializer { + + //===----------------------------------------------------------===// + // Internal type definitions. + //===----------------------------------------------------------===// + + struct BPNode { + BPNode* Next; + uintptr_t& PtrRef; + + BPNode(BPNode* n, uintptr_t& pref) + : Next(n), PtrRef(pref) { + PtrRef = 0; + } + }; + + struct BPEntry { + union { BPNode* Head; void* Ptr; }; + + BPEntry() : Head(NULL) {} + + static inline bool isPod() { return true; } + + void SetPtr(BPNode*& FreeList, void* P); + }; + + class BPKey { + unsigned Raw; + + public: + BPKey(SerializedPtrID PtrId) : Raw(PtrId << 1) { assert (PtrId > 0); } + BPKey(unsigned code, unsigned) : Raw(code) {} + + void MarkFinal() { Raw |= 0x1; } + bool hasFinalPtr() const { return Raw & 0x1 ? true : false; } + SerializedPtrID getID() const { return Raw >> 1; } + + static inline BPKey getEmptyKey() { return BPKey(0,0); } + static inline BPKey getTombstoneKey() { return BPKey(1,0); } + static inline unsigned getHashValue(const BPKey& K) { return K.Raw & ~0x1; } + + static bool isEqual(const BPKey& K1, const BPKey& K2) { + return (K1.Raw ^ K2.Raw) & ~0x1 ? false : true; + } + + static bool isPod() { return true; } + }; + + typedef llvm::DenseMap<BPKey,BPEntry,BPKey,BPEntry> MapTy; + + //===----------------------------------------------------------===// + // Publicly visible types. + //===----------------------------------------------------------===// + +public: + struct Location { + uint64_t BitNo; + unsigned BlockID; + unsigned NumWords; + + Location(uint64_t bit, unsigned bid, unsigned words) + : BitNo(bit), BlockID(bid), NumWords(words) {} + + Location() : BitNo(0), BlockID(0), NumWords(0) {} + + Location& operator=(Location& RHS) { + BitNo = RHS.BitNo; + BlockID = RHS.BlockID; + NumWords = RHS.NumWords; + return *this; + } + + bool operator==(const Location& RHS) const { return BitNo == RHS.BitNo; } + bool operator!=(const Location& RHS) const { return BitNo != RHS.BitNo; } + + bool contains(const Location& RHS) const { + if (RHS.BitNo < BitNo) + return false; + + if ((RHS.BitNo - BitNo) >> 5 < NumWords) + return true; + + return false; + } + }; + + //===----------------------------------------------------------===// + // Internal data members. + //===----------------------------------------------------------===// + +private: + BitstreamCursor Stream; + SmallVector<uint64_t,20> Record; + unsigned RecIdx; + BumpPtrAllocator Allocator; + BPNode* FreeList; + MapTy BPatchMap; + llvm::SmallVector<Location,8> BlockStack; + unsigned AbbrevNo; + unsigned RecordCode; + uint64_t StreamStart; + + //===----------------------------------------------------------===// + // Public Interface. + //===----------------------------------------------------------===// + +public: + Deserializer(BitstreamReader& stream); + ~Deserializer(); + + uint64_t ReadInt(); + int64_t ReadSInt(); + SerializedPtrID ReadPtrID() { return (SerializedPtrID) ReadInt(); } + + + bool ReadBool() { + return ReadInt() ? true : false; + } + + template <typename T> + inline T& Read(T& X) { + SerializeTrait<T>::Read(*this,X); + return X; + } + + template <typename T> + inline T* Create() { + return SerializeTrait<T>::Create(*this); + } + + char* ReadCStr(char* cstr = NULL, unsigned MaxLen=0, bool isNullTerm=true); + void ReadCStr(std::vector<char>& buff, bool isNullTerm=false, unsigned Idx=0); + + template <typename T> + inline T* ReadOwnedPtr(bool AutoRegister = true) { + SerializedPtrID PtrID = ReadPtrID(); + + if (!PtrID) + return NULL; + + T* x = SerializeTrait<T>::Create(*this); + + if (AutoRegister) + RegisterPtr(PtrID,x); + + return x; + } + + template <typename T, typename Arg1> + inline T* ReadOwnedPtr(Arg1& arg1, bool AutoRegister = true) { + SerializedPtrID PtrID = ReadPtrID(); + + if (!PtrID) + return NULL; + + T* x = SerializeTrait<T>::Create(*this, arg1); + + if (AutoRegister) + RegisterPtr(PtrID,x); + + return x; + } + + template <typename T> + inline void ReadOwnedPtr(T*& Ptr, bool AutoRegister = true) { + Ptr = ReadOwnedPtr<T>(AutoRegister); + } + + template <typename T1, typename T2> + void BatchReadOwnedPtrs(T1*& P1, T2*& P2, + bool A1=true, bool A2=true) { + + SerializedPtrID ID1 = ReadPtrID(); + SerializedPtrID ID2 = ReadPtrID(); + + P1 = (ID1) ? SerializeTrait<T1>::Create(*this) : NULL; + if (ID1 && A1) RegisterPtr(ID1,P1); + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + } + + template <typename T1, typename T2, typename Arg1> + void BatchReadOwnedPtrs(T1*& P1, T2*& P2, Arg1& arg1, + bool A1=true, bool A2=true) { + + SerializedPtrID ID1 = ReadPtrID(); + SerializedPtrID ID2 = ReadPtrID(); + + P1 = (ID1) ? SerializeTrait<T1>::Create(*this, arg1) : NULL; + if (ID1 && A1) RegisterPtr(ID1,P1); + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this, arg1) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + } + + template <typename T1, typename T2, typename T3> + void BatchReadOwnedPtrs(T1*& P1, T2*& P2, T3*& P3, + bool A1=true, bool A2=true, bool A3=true) { + + SerializedPtrID ID1 = ReadPtrID(); + SerializedPtrID ID2 = ReadPtrID(); + SerializedPtrID ID3 = ReadPtrID(); + + P1 = (ID1) ? SerializeTrait<T1>::Create(*this) : NULL; + if (ID1 && A1) RegisterPtr(ID1,P1); + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + + P3 = (ID3) ? SerializeTrait<T3>::Create(*this) : NULL; + if (ID3 && A3) RegisterPtr(ID3,P3); + } + + template <typename T1, typename T2, typename T3, typename Arg1> + void BatchReadOwnedPtrs(T1*& P1, T2*& P2, T3*& P3, Arg1& arg1, + bool A1=true, bool A2=true, bool A3=true) { + + SerializedPtrID ID1 = ReadPtrID(); + SerializedPtrID ID2 = ReadPtrID(); + SerializedPtrID ID3 = ReadPtrID(); + + P1 = (ID1) ? SerializeTrait<T1>::Create(*this, arg1) : NULL; + if (ID1 && A1) RegisterPtr(ID1,P1); + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this, arg1) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + + P3 = (ID3) ? SerializeTrait<T3>::Create(*this, arg1) : NULL; + if (ID3 && A3) RegisterPtr(ID3,P3); + } + + template <typename T> + void BatchReadOwnedPtrs(unsigned NumPtrs, T** Ptrs, bool AutoRegister=true) { + llvm::SmallVector<SerializedPtrID,10> BatchIDVec; + + for (unsigned i = 0; i < NumPtrs; ++i) + BatchIDVec.push_back(ReadPtrID()); + + for (unsigned i = 0; i < NumPtrs; ++i) { + SerializedPtrID& PtrID = BatchIDVec[i]; + + T* p = PtrID ? SerializeTrait<T>::Create(*this) : NULL; + + if (PtrID && AutoRegister) + RegisterPtr(PtrID,p); + + Ptrs[i] = p; + } + } + + template <typename T, typename Arg1> + void BatchReadOwnedPtrs(unsigned NumPtrs, T** Ptrs, Arg1& arg1, + bool AutoRegister=true) { + + llvm::SmallVector<SerializedPtrID,10> BatchIDVec; + + for (unsigned i = 0; i < NumPtrs; ++i) + BatchIDVec.push_back(ReadPtrID()); + + for (unsigned i = 0; i < NumPtrs; ++i) { + SerializedPtrID& PtrID = BatchIDVec[i]; + + T* p = PtrID ? SerializeTrait<T>::Create(*this, arg1) : NULL; + + if (PtrID && AutoRegister) + RegisterPtr(PtrID,p); + + Ptrs[i] = p; + } + } + + template <typename T1, typename T2> + void BatchReadOwnedPtrs(unsigned NumT1Ptrs, T1** Ptrs, T2*& P2, + bool A1=true, bool A2=true) { + + llvm::SmallVector<SerializedPtrID,10> BatchIDVec; + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + BatchIDVec.push_back(ReadPtrID()); + + SerializedPtrID ID2 = ReadPtrID(); + + for (unsigned i = 0; i < NumT1Ptrs; ++i) { + SerializedPtrID& PtrID = BatchIDVec[i]; + + T1* p = PtrID ? SerializeTrait<T1>::Create(*this) : NULL; + + if (PtrID && A1) + RegisterPtr(PtrID,p); + + Ptrs[i] = p; + } + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + } + + template <typename T1, typename T2, typename Arg1> + void BatchReadOwnedPtrs(unsigned NumT1Ptrs, T1** Ptrs, T2*& P2, Arg1& arg1, + bool A1=true, bool A2=true) { + + llvm::SmallVector<SerializedPtrID,10> BatchIDVec; + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + BatchIDVec.push_back(ReadPtrID()); + + SerializedPtrID ID2 = ReadPtrID(); + + for (unsigned i = 0; i < NumT1Ptrs; ++i) { + SerializedPtrID& PtrID = BatchIDVec[i]; + + T1* p = PtrID ? SerializeTrait<T1>::Create(*this, arg1) : NULL; + + if (PtrID && A1) + RegisterPtr(PtrID,p); + + Ptrs[i] = p; + } + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this, arg1) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + } + + template <typename T1, typename T2, typename T3> + void BatchReadOwnedPtrs(unsigned NumT1Ptrs, T1** Ptrs, + T2*& P2, T3*& P3, + bool A1=true, bool A2=true, bool A3=true) { + + llvm::SmallVector<SerializedPtrID,10> BatchIDVec; + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + BatchIDVec.push_back(ReadPtrID()); + + SerializedPtrID ID2 = ReadPtrID(); + SerializedPtrID ID3 = ReadPtrID(); + + for (unsigned i = 0; i < NumT1Ptrs; ++i) { + SerializedPtrID& PtrID = BatchIDVec[i]; + + T1* p = PtrID ? SerializeTrait<T1>::Create(*this) : NULL; + + if (PtrID && A1) + RegisterPtr(PtrID,p); + + Ptrs[i] = p; + } + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + + P3 = (ID3) ? SerializeTrait<T3>::Create(*this) : NULL; + if (ID3 && A3) RegisterPtr(ID3,P3); + } + + template <typename T1, typename T2, typename T3, typename Arg1> + void BatchReadOwnedPtrs(unsigned NumT1Ptrs, T1** Ptrs, + T2*& P2, T3*& P3, Arg1& arg1, + bool A1=true, bool A2=true, bool A3=true) { + + llvm::SmallVector<SerializedPtrID,10> BatchIDVec; + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + BatchIDVec.push_back(ReadPtrID()); + + SerializedPtrID ID2 = ReadPtrID(); + SerializedPtrID ID3 = ReadPtrID(); + + for (unsigned i = 0; i < NumT1Ptrs; ++i) { + SerializedPtrID& PtrID = BatchIDVec[i]; + + T1* p = PtrID ? SerializeTrait<T1>::Create(*this, arg1) : NULL; + + if (PtrID && A1) + RegisterPtr(PtrID,p); + + Ptrs[i] = p; + } + + P2 = (ID2) ? SerializeTrait<T2>::Create(*this, arg1) : NULL; + if (ID2 && A2) RegisterPtr(ID2,P2); + + P3 = (ID3) ? SerializeTrait<T3>::Create(*this, arg1) : NULL; + if (ID3 && A3) RegisterPtr(ID3,P3); + } + + template <typename T> + void ReadPtr(T*& PtrRef, bool AllowBackpatch = true) { + ReadUIntPtr(reinterpret_cast<uintptr_t&>(PtrRef), AllowBackpatch); + } + + template <typename T> + void ReadPtr(const T*& PtrRef, bool AllowBackpatch = true) { + ReadPtr(const_cast<T*&>(PtrRef), AllowBackpatch); + } + + + template <typename T> + void ReadPtr(T*& PtrRef, const SerializedPtrID& PtrID, + bool AllowBackpatch = true) { + ReadUIntPtr(reinterpret_cast<uintptr_t&>(PtrRef), PtrID, AllowBackpatch); + } + + template <typename T> + void ReadPtr(const T*& PtrRef, const SerializedPtrID& PtrID, + bool AllowBackpatch = true) { + + ReadPtr(const_cast<T*&>(PtrRef), PtrID, AllowBackpatch); + } + + template <typename T> + T* ReadPtr() { T* x = 0; ReadPtr<T>(x,false); return x; } + + void ReadUIntPtr(uintptr_t& PtrRef, const SerializedPtrID& PtrID, + bool AllowBackpatch = true); + + void ReadUIntPtr(uintptr_t& PtrRef, bool AllowBackpatch = true) { + ReadUIntPtr(PtrRef,ReadPtrID(),AllowBackpatch); + } + + template <typename T> + T& ReadRef() { + T* p = reinterpret_cast<T*>(ReadInternalRefPtr()); + return *p; + } + + void RegisterPtr(const SerializedPtrID& PtrID, const void* Ptr); + + void RegisterPtr(const void* Ptr) { + RegisterPtr(ReadPtrID(),Ptr); + } + + template<typename T> + void RegisterRef(const T& x) { + RegisterPtr(&x); + } + + template<typename T> + void RegisterRef(const SerializedPtrID& PtrID, const T& x) { + RegisterPtr(PtrID,&x); + } + + Location getCurrentBlockLocation(); + unsigned getCurrentBlockID(); + unsigned getAbbrevNo(); + + bool FinishedBlock(Location BlockLoc); + bool JumpTo(const Location& BlockLoc); + void Rewind(); + + bool AtEnd(); + bool inRecord(); + void SkipBlock(); + bool SkipToBlock(unsigned BlockID); + + unsigned getRecordCode(); + + BitstreamCursor &getStream() { return Stream; } + +private: + bool AdvanceStream(); + void ReadRecord(); + + uintptr_t ReadInternalRefPtr(); + + static inline bool HasFinalPtr(MapTy::value_type& V) { + return V.first.hasFinalPtr(); + } + + static inline uintptr_t GetFinalPtr(MapTy::value_type& V) { + return reinterpret_cast<uintptr_t>(V.second.Ptr); + } + + static inline BPNode* GetBPNode(MapTy::value_type& V) { + return V.second.Head; + } + + static inline void SetBPNode(MapTy::value_type& V, BPNode* N) { + V.second.Head = N; + } + + void SetPtr(MapTy::value_type& V, const void* P) { + V.first.MarkFinal(); + V.second.SetPtr(FreeList,const_cast<void*>(P)); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h new file mode 100644 index 0000000..1ede69d --- /dev/null +++ b/include/llvm/Bitcode/LLVMBitCodes.h @@ -0,0 +1,221 @@ +//===- LLVMBitCodes.h - Enum values for the LLVM bitcode format -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header defines Bitcode enum values for LLVM IR bitcode files. +// +// The enum values defined in this file should be considered permanent. If +// new features are added, they should have values added at the end of the +// respective lists. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_LLVMBITCODES_H +#define LLVM_BITCODE_LLVMBITCODES_H + +#include "llvm/Bitcode/BitCodes.h" + +namespace llvm { +namespace bitc { + // The only top-level block type defined is for a module. + enum BlockIDs { + // Blocks + MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID, + + // Module sub-block id's. + PARAMATTR_BLOCK_ID, + TYPE_BLOCK_ID, + CONSTANTS_BLOCK_ID, + FUNCTION_BLOCK_ID, + TYPE_SYMTAB_BLOCK_ID, + VALUE_SYMTAB_BLOCK_ID + }; + + + /// MODULE blocks have a number of optional fields and subblocks. + enum ModuleCodes { + MODULE_CODE_VERSION = 1, // VERSION: [version#] + MODULE_CODE_TRIPLE = 2, // TRIPLE: [strchr x N] + MODULE_CODE_DATALAYOUT = 3, // DATALAYOUT: [strchr x N] + MODULE_CODE_ASM = 4, // ASM: [strchr x N] + MODULE_CODE_SECTIONNAME = 5, // SECTIONNAME: [strchr x N] + MODULE_CODE_DEPLIB = 6, // DEPLIB: [strchr x N] + + // GLOBALVAR: [pointer type, isconst, initid, + // linkage, alignment, section, visibility, threadlocal] + MODULE_CODE_GLOBALVAR = 7, + + // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment, + // section, visibility] + MODULE_CODE_FUNCTION = 8, + + // ALIAS: [alias type, aliasee val#, linkage] + MODULE_CODE_ALIAS = 9, + + /// MODULE_CODE_PURGEVALS: [numvals] + MODULE_CODE_PURGEVALS = 10, + + MODULE_CODE_GCNAME = 11 // GCNAME: [strchr x N] + }; + + /// PARAMATTR blocks have code for defining a parameter attribute set. + enum AttributeCodes { + PARAMATTR_CODE_ENTRY = 1 // ENTRY: [paramidx0, attr0, paramidx1, attr1...] + }; + + /// TYPE blocks have codes for each type primitive they use. + enum TypeCodes { + TYPE_CODE_NUMENTRY = 1, // NUMENTRY: [numentries] + + // Type Codes + TYPE_CODE_VOID = 2, // VOID + TYPE_CODE_FLOAT = 3, // FLOAT + TYPE_CODE_DOUBLE = 4, // DOUBLE + TYPE_CODE_LABEL = 5, // LABEL + TYPE_CODE_OPAQUE = 6, // OPAQUE + TYPE_CODE_INTEGER = 7, // INTEGER: [width] + TYPE_CODE_POINTER = 8, // POINTER: [pointee type] + TYPE_CODE_FUNCTION = 9, // FUNCTION: [vararg, retty, paramty x N] + TYPE_CODE_STRUCT = 10, // STRUCT: [ispacked, eltty x N] + TYPE_CODE_ARRAY = 11, // ARRAY: [numelts, eltty] + TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty] + + // These are not with the other floating point types because they're + // a late addition, and putting them in the right place breaks + // binary compatibility. + TYPE_CODE_X86_FP80 = 13, // X86 LONG DOUBLE + TYPE_CODE_FP128 = 14, // LONG DOUBLE (112 bit mantissa) + TYPE_CODE_PPC_FP128= 15, // PPC LONG DOUBLE (2 doubles) + + TYPE_CODE_METADATA = 16 // METADATA + }; + + // The type symbol table only has one code (TST_ENTRY_CODE). + enum TypeSymtabCodes { + TST_CODE_ENTRY = 1 // TST_ENTRY: [typeid, namechar x N] + }; + + // The value symbol table only has one code (VST_ENTRY_CODE). + enum ValueSymtabCodes { + VST_CODE_ENTRY = 1, // VST_ENTRY: [valid, namechar x N] + VST_CODE_BBENTRY = 2 // VST_BBENTRY: [bbid, namechar x N] + }; + + // The constants block (CONSTANTS_BLOCK_ID) describes emission for each + // constant and maintains an implicit current type value. + enum ConstantsCodes { + CST_CODE_SETTYPE = 1, // SETTYPE: [typeid] + CST_CODE_NULL = 2, // NULL + CST_CODE_UNDEF = 3, // UNDEF + CST_CODE_INTEGER = 4, // INTEGER: [intval] + CST_CODE_WIDE_INTEGER = 5, // WIDE_INTEGER: [n x intval] + CST_CODE_FLOAT = 6, // FLOAT: [fpval] + CST_CODE_AGGREGATE = 7, // AGGREGATE: [n x value number] + CST_CODE_STRING = 8, // STRING: [values] + CST_CODE_CSTRING = 9, // CSTRING: [values] + CST_CODE_CE_BINOP = 10, // CE_BINOP: [opcode, opval, opval] + CST_CODE_CE_CAST = 11, // CE_CAST: [opcode, opty, opval] + CST_CODE_CE_GEP = 12, // CE_GEP: [n x operands] + CST_CODE_CE_SELECT = 13, // CE_SELECT: [opval, opval, opval] + CST_CODE_CE_EXTRACTELT = 14, // CE_EXTRACTELT: [opty, opval, opval] + CST_CODE_CE_INSERTELT = 15, // CE_INSERTELT: [opval, opval, opval] + CST_CODE_CE_SHUFFLEVEC = 16, // CE_SHUFFLEVEC: [opval, opval, opval] + CST_CODE_CE_CMP = 17, // CE_CMP: [opty, opval, opval, pred] + CST_CODE_INLINEASM = 18, // INLINEASM: [sideeffect,asmstr,conststr] + CST_CODE_CE_SHUFVEC_EX = 19, // SHUFVEC_EX: [opty, opval, opval, opval] + CST_CODE_MDSTRING = 20, // MDSTRING: [values] + CST_CODE_MDNODE = 21 // MDNODE: [n x (type num, value num)] + }; + + /// CastOpcodes - These are values used in the bitcode files to encode which + /// cast a CST_CODE_CE_CAST or a XXX refers to. The values of these enums + /// have no fixed relation to the LLVM IR enum values. Changing these will + /// break compatibility with old files. + enum CastOpcodes { + CAST_TRUNC = 0, + CAST_ZEXT = 1, + CAST_SEXT = 2, + CAST_FPTOUI = 3, + CAST_FPTOSI = 4, + CAST_UITOFP = 5, + CAST_SITOFP = 6, + CAST_FPTRUNC = 7, + CAST_FPEXT = 8, + CAST_PTRTOINT = 9, + CAST_INTTOPTR = 10, + CAST_BITCAST = 11 + }; + + /// BinaryOpcodes - These are values used in the bitcode files to encode which + /// binop a CST_CODE_CE_BINOP or a XXX refers to. The values of these enums + /// have no fixed relation to the LLVM IR enum values. Changing these will + /// break compatibility with old files. + enum BinaryOpcodes { + BINOP_ADD = 0, + BINOP_SUB = 1, + BINOP_MUL = 2, + BINOP_UDIV = 3, + BINOP_SDIV = 4, // overloaded for FP + BINOP_UREM = 5, + BINOP_SREM = 6, // overloaded for FP + BINOP_SHL = 7, + BINOP_LSHR = 8, + BINOP_ASHR = 9, + BINOP_AND = 10, + BINOP_OR = 11, + BINOP_XOR = 12 + }; + + + // The function body block (FUNCTION_BLOCK_ID) describes function bodies. It + // can contain a constant block (CONSTANTS_BLOCK_ID). + enum FunctionCodes { + FUNC_CODE_DECLAREBLOCKS = 1, // DECLAREBLOCKS: [n] + + FUNC_CODE_INST_BINOP = 2, // BINOP: [opcode, ty, opval, opval] + FUNC_CODE_INST_CAST = 3, // CAST: [opcode, ty, opty, opval] + FUNC_CODE_INST_GEP = 4, // GEP: [n x operands] + FUNC_CODE_INST_SELECT = 5, // SELECT: [ty, opval, opval, opval] + FUNC_CODE_INST_EXTRACTELT = 6, // EXTRACTELT: [opty, opval, opval] + FUNC_CODE_INST_INSERTELT = 7, // INSERTELT: [ty, opval, opval, opval] + FUNC_CODE_INST_SHUFFLEVEC = 8, // SHUFFLEVEC: [ty, opval, opval, opval] + FUNC_CODE_INST_CMP = 9, // CMP: [opty, opval, opval, pred] + + FUNC_CODE_INST_RET = 10, // RET: [opty,opval<both optional>] + FUNC_CODE_INST_BR = 11, // BR: [bb#, bb#, cond] or [bb#] + FUNC_CODE_INST_SWITCH = 12, // SWITCH: [opty, opval, n, n x ops] + FUNC_CODE_INST_INVOKE = 13, // INVOKE: [attr, fnty, op0,op1, ...] + FUNC_CODE_INST_UNWIND = 14, // UNWIND + FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE + + FUNC_CODE_INST_PHI = 16, // PHI: [ty, val0,bb0, ...] + FUNC_CODE_INST_MALLOC = 17, // MALLOC: [instty, op, align] + FUNC_CODE_INST_FREE = 18, // FREE: [opty, op] + FUNC_CODE_INST_ALLOCA = 19, // ALLOCA: [instty, op, align] + FUNC_CODE_INST_LOAD = 20, // LOAD: [opty, op, align, vol] + // FIXME: Remove STORE in favor of STORE2 in LLVM 3.0 + FUNC_CODE_INST_STORE = 21, // STORE: [valty,val,ptr, align, vol] + FUNC_CODE_INST_CALL = 22, // CALL: [attr, fnty, fnid, args...] + FUNC_CODE_INST_VAARG = 23, // VAARG: [valistty, valist, instty] + // This store code encodes the pointer type, rather than the value type + // this is so information only available in the pointer type (e.g. address + // spaces) is retained. + FUNC_CODE_INST_STORE2 = 24, // STORE: [ptrty,ptr,val, align, vol] + // FIXME: Remove GETRESULT in favor of EXTRACTVAL in LLVM 3.0 + FUNC_CODE_INST_GETRESULT = 25, // GETRESULT: [ty, opval, n] + FUNC_CODE_INST_EXTRACTVAL = 26, // EXTRACTVAL: [n x operands] + FUNC_CODE_INST_INSERTVAL = 27, // INSERTVAL: [n x operands] + // fcmp/icmp returning Int1TY or vector of Int1Ty, NOT for vicmp/vfcmp + FUNC_CODE_INST_CMP2 = 28, // CMP2: [opty, opval, opval, pred] + // new select on i1 or [N x i1] + FUNC_CODE_INST_VSELECT = 29 // VSELECT: [ty,opval,opval,predty,pred] + }; +} // End bitc namespace +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/ReaderWriter.h b/include/llvm/Bitcode/ReaderWriter.h new file mode 100644 index 0000000..abdd5d3 --- /dev/null +++ b/include/llvm/Bitcode/ReaderWriter.h @@ -0,0 +1,113 @@ +//===-- llvm/Bitcode/ReaderWriter.h - Bitcode reader/writers ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header defines interfaces to read and write LLVM bitcode files/streams. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_H +#define LLVM_BITCODE_H + +#include <iosfwd> +#include <string> + +namespace llvm { + class Module; + class ModuleProvider; + class MemoryBuffer; + class ModulePass; + class BitstreamWriter; + class raw_ostream; + + /// getBitcodeModuleProvider - Read the header of the specified bitcode buffer + /// and prepare for lazy deserialization of function bodies. If successful, + /// this takes ownership of 'buffer' and returns a non-null pointer. On + /// error, this returns null, *does not* take ownership of Buffer, and fills + /// in *ErrMsg with an error description if ErrMsg is non-null. + ModuleProvider *getBitcodeModuleProvider(MemoryBuffer *Buffer, + std::string *ErrMsg = 0); + + /// ParseBitcodeFile - Read the specified bitcode file, returning the module. + /// If an error occurs, this returns null and fills in *ErrMsg if it is + /// non-null. This method *never* takes ownership of Buffer. + Module *ParseBitcodeFile(MemoryBuffer *Buffer, std::string *ErrMsg = 0); + + /// WriteBitcodeToFile - Write the specified module to the specified output + /// stream. + void WriteBitcodeToFile(const Module *M, std::ostream &Out); + + /// WriteBitcodeToFile - Write the specified module to the specified + /// raw output stream. + void WriteBitcodeToFile(const Module *M, raw_ostream &Out); + + /// WriteBitcodeToStream - Write the specified module to the specified + /// raw output stream. + void WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream); + + /// CreateBitcodeWriterPass - Create and return a pass that writes the module + /// to the specified ostream. + ModulePass *CreateBitcodeWriterPass(std::ostream &Str); + + /// createBitcodeWriterPass - Create and return a pass that writes the module + /// to the specified ostream. + ModulePass *createBitcodeWriterPass(raw_ostream &Str); + + + /// isBitcodeWrapper - Return true fi this is a wrapper for LLVM IR bitcode + /// files. + static bool inline isBitcodeWrapper(unsigned char *BufPtr, + unsigned char *BufEnd) { + return (BufPtr != BufEnd && BufPtr[0] == 0xDE && BufPtr[1] == 0xC0 && + BufPtr[2] == 0x17 && BufPtr[3] == 0x0B); + } + + /// SkipBitcodeWrapperHeader - Some systems wrap bc files with a special + /// header for padding or other reasons. The format of this header is: + /// + /// struct bc_header { + /// uint32_t Magic; // 0x0B17C0DE + /// uint32_t Version; // Version, currently always 0. + /// uint32_t BitcodeOffset; // Offset to traditional bitcode file. + /// uint32_t BitcodeSize; // Size of traditional bitcode file. + /// ... potentially other gunk ... + /// }; + /// + /// This function is called when we find a file with a matching magic number. + /// In this case, skip down to the subsection of the file that is actually a + /// BC file. + static inline bool SkipBitcodeWrapperHeader(unsigned char *&BufPtr, + unsigned char *&BufEnd) { + enum { + KnownHeaderSize = 4*4, // Size of header we read. + OffsetField = 2*4, // Offset in bytes to Offset field. + SizeField = 3*4 // Offset in bytes to Size field. + }; + + // Must contain the header! + if (BufEnd-BufPtr < KnownHeaderSize) return true; + + unsigned Offset = ( BufPtr[OffsetField ] | + (BufPtr[OffsetField+1] << 8) | + (BufPtr[OffsetField+2] << 16) | + (BufPtr[OffsetField+3] << 24)); + unsigned Size = ( BufPtr[SizeField ] | + (BufPtr[SizeField +1] << 8) | + (BufPtr[SizeField +2] << 16) | + (BufPtr[SizeField +3] << 24)); + + // Verify that Offset+Size fits in the file. + if (Offset+Size > unsigned(BufEnd-BufPtr)) + return true; + BufPtr += Offset; + BufEnd = BufPtr+Size; + return false; + } +} // End llvm namespace + +#endif diff --git a/include/llvm/Bitcode/Serialization.h b/include/llvm/Bitcode/Serialization.h new file mode 100644 index 0000000..2f0d350 --- /dev/null +++ b/include/llvm/Bitcode/Serialization.h @@ -0,0 +1,68 @@ +//==- Serialization.h - Generic Object Serialization to Bitcode ---*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines traits for primitive types used for both object +// serialization and deserialization. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_SERIALIZE +#define LLVM_BITCODE_SERIALIZE + +#include "llvm/Bitcode/SerializationFwd.h" + +namespace llvm { + +/// SerializeTrait - SerializeTrait bridges between the Serializer/Deserializer +/// and the functions that serialize objects of specific types. The default +/// behavior is to call static methods of the class for the object being +/// serialized, but this behavior can be changed by specializing this +/// template. Classes only need to implement the methods corresponding +/// to the serialization scheme they want to support. For example, "Read" +/// and "ReadVal" correspond to different deserialization schemes which make +/// sense for different types; a class need only implement one of them. +/// Serialization and deserialization of pointers are specially handled +/// by the Serializer and Deserializer using the EmitOwnedPtr, etc. methods. +/// To serialize the actual object referred to by a pointer, the class +/// of the object either must implement the methods called by the default +/// behavior of SerializeTrait, or specialize SerializeTrait. This latter +/// is useful when one cannot add methods to an existing class (for example). +template <typename T> +struct SerializeTrait { + static inline void Emit(Serializer& S, const T& X) { X.Emit(S); } + static inline void Read(Deserializer& D, T& X) { X.Read(D); } + static inline T* Create(Deserializer& D) { return T::Create(D); } + + template <typename Arg1> + static inline T* Create(Deserializer& D, Arg1& arg1) { + return T::Create(D, arg1); + } +}; + +#define SERIALIZE_INT_TRAIT(TYPE)\ +template <> struct SerializeTrait<TYPE> {\ + static void Emit(Serializer& S, TYPE X);\ + static void Read(Deserializer& S, TYPE& X); }; + +SERIALIZE_INT_TRAIT(bool) +SERIALIZE_INT_TRAIT(unsigned char) +SERIALIZE_INT_TRAIT(unsigned short) +SERIALIZE_INT_TRAIT(unsigned int) +SERIALIZE_INT_TRAIT(unsigned long) + +SERIALIZE_INT_TRAIT(signed char) +SERIALIZE_INT_TRAIT(signed short) +SERIALIZE_INT_TRAIT(signed int) +SERIALIZE_INT_TRAIT(signed long) + +#undef SERIALIZE_INT_TRAIT + +} // end namespace llvm + +#endif diff --git a/include/llvm/Bitcode/SerializationFwd.h b/include/llvm/Bitcode/SerializationFwd.h new file mode 100644 index 0000000..7224190 --- /dev/null +++ b/include/llvm/Bitcode/SerializationFwd.h @@ -0,0 +1,27 @@ +//==- SerializationFwd.h - Forward references for Serialization ---*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides forward references for bitcode object serialization. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_SERIALIZE_FWD +#define LLVM_BITCODE_SERIALIZE_FWD + +namespace llvm { + +class Serializer; +class Deserializer; +template <typename T> struct SerializeTrait; + +typedef unsigned SerializedPtrID; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Bitcode/Serialize.h b/include/llvm/Bitcode/Serialize.h new file mode 100644 index 0000000..6fe4f02 --- /dev/null +++ b/include/llvm/Bitcode/Serialize.h @@ -0,0 +1,211 @@ +//==- Serialize.h - Generic Object Serialization to Bitcode -------*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface for generic object serialization to +// LLVM bitcode. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_SERIALIZE_OUTPUT +#define LLVM_BITCODE_SERIALIZE_OUTPUT + +#include "llvm/Bitcode/Serialization.h" +#include "llvm/Bitcode/BitstreamWriter.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + +class Serializer { + BitstreamWriter& Stream; + SmallVector<uint64_t,10> Record; + unsigned BlockLevel; + + typedef DenseMap<const void*,unsigned> MapTy; + MapTy PtrMap; + +public: + explicit Serializer(BitstreamWriter& stream); + ~Serializer(); + + //==------------------------------------------------==// + // Template-based dispatch to emit arbitrary types. + //==------------------------------------------------==// + + template <typename T> + inline void Emit(const T& X) { SerializeTrait<T>::Emit(*this,X); } + + //==------------------------------------------------==// + // Methods to emit primitive types. + //==------------------------------------------------==// + + void EmitInt(uint64_t X); + void EmitSInt(int64_t X); + + inline void EmitBool(bool X) { EmitInt(X); } + void EmitCStr(const char* beg, const char* end); + void EmitCStr(const char* cstr); + + void EmitPtr(const void* ptr) { EmitInt(getPtrId(ptr)); } + + template <typename T> + inline void EmitRef(const T& ref) { EmitPtr(&ref); } + + // Emit a pointer and the object pointed to. (This has no relation to the + // OwningPtr<> class.) + template <typename T> + inline void EmitOwnedPtr(T* ptr) { + EmitPtr(ptr); + if (ptr) SerializeTrait<T>::Emit(*this,*ptr); + } + + + //==------------------------------------------------==// + // Batch emission of pointers. + //==------------------------------------------------==// + + template <typename T1, typename T2> + void BatchEmitOwnedPtrs(T1* p1, T2* p2) { + EmitPtr(p1); + EmitPtr(p2); + if (p1) SerializeTrait<T1>::Emit(*this,*p1); + if (p2) SerializeTrait<T2>::Emit(*this,*p2); + } + + template <typename T1, typename T2, typename T3> + void BatchEmitOwnedPtrs(T1* p1, T2* p2, T3* p3) { + EmitPtr(p1); + EmitPtr(p2); + EmitPtr(p3); + if (p1) SerializeTrait<T1>::Emit(*this,*p1); + if (p2) SerializeTrait<T2>::Emit(*this,*p2); + if (p3) SerializeTrait<T3>::Emit(*this,*p3); + } + + template <typename T1, typename T2, typename T3, typename T4> + void BatchEmitOwnedPtrs(T1* p1, T2* p2, T3* p3, T4& p4) { + EmitPtr(p1); + EmitPtr(p2); + EmitPtr(p3); + EmitPtr(p4); + if (p1) SerializeTrait<T1>::Emit(*this,*p1); + if (p2) SerializeTrait<T2>::Emit(*this,*p2); + if (p3) SerializeTrait<T3>::Emit(*this,*p3); + if (p4) SerializeTrait<T4>::Emit(*this,*p4); + } + + template <typename T> + void BatchEmitOwnedPtrs(unsigned NumPtrs, T* const * Ptrs) { + for (unsigned i = 0; i < NumPtrs; ++i) + EmitPtr(Ptrs[i]); + + for (unsigned i = 0; i < NumPtrs; ++i) + if (Ptrs[i]) SerializeTrait<T>::Emit(*this,*Ptrs[i]); + } + + template <typename T1, typename T2> + void BatchEmitOwnedPtrs(unsigned NumT1Ptrs, T1* const * Ptrs, T2* p2) { + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + EmitPtr(Ptrs[i]); + + EmitPtr(p2); + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + if (Ptrs[i]) SerializeTrait<T1>::Emit(*this,*Ptrs[i]); + + if (p2) SerializeTrait<T2>::Emit(*this,*p2); + } + + template <typename T1, typename T2, typename T3> + void BatchEmitOwnedPtrs(unsigned NumT1Ptrs, T1* const * Ptrs, + T2* p2, T3* p3) { + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + EmitPtr(Ptrs[i]); + + EmitPtr(p2); + EmitPtr(p3); + + for (unsigned i = 0; i < NumT1Ptrs; ++i) + if (Ptrs[i]) SerializeTrait<T1>::Emit(*this,*Ptrs[i]); + + if (p2) SerializeTrait<T2>::Emit(*this,*p2); + if (p3) SerializeTrait<T3>::Emit(*this,*p3); + } + + //==------------------------------------------------==// + // Emitter Functors + //==------------------------------------------------==// + + template <typename T> + struct Emitter0 { + Serializer& S; + Emitter0(Serializer& s) : S(s) {} + void operator()(const T& x) const { + SerializeTrait<T>::Emit(S,x); + } + }; + + template <typename T, typename Arg1> + struct Emitter1 { + Serializer& S; + Arg1 A1; + + Emitter1(Serializer& s, Arg1 a1) : S(s), A1(a1) {} + void operator()(const T& x) const { + SerializeTrait<T>::Emit(S,x,A1); + } + }; + + template <typename T, typename Arg1, typename Arg2> + struct Emitter2 { + Serializer& S; + Arg1 A1; + Arg2 A2; + + Emitter2(Serializer& s, Arg1 a1, Arg2 a2) : S(s), A1(a1), A2(a2) {} + void operator()(const T& x) const { + SerializeTrait<T>::Emit(S,x,A1,A2); + } + }; + + template <typename T> + Emitter0<T> MakeEmitter() { + return Emitter0<T>(*this); + } + + template <typename T, typename Arg1> + Emitter1<T,Arg1> MakeEmitter(Arg1 a1) { + return Emitter1<T,Arg1>(*this,a1); + } + + template <typename T, typename Arg1, typename Arg2> + Emitter2<T,Arg1,Arg2> MakeEmitter(Arg1 a1, Arg2 a2) { + return Emitter2<T,Arg1,Arg2>(*this,a1,a2); + } + + //==------------------------------------------------==// + // Misc. query and block/record manipulation methods. + //==------------------------------------------------==// + + bool isRegistered(const void* p) const; + + void FlushRecord() { if (inRecord()) EmitRecord(); } + void EnterBlock(unsigned BlockID = 8, unsigned CodeLen = 3); + void ExitBlock(); + +private: + void EmitRecord(); + inline bool inRecord() { return Record.size() > 0; } + SerializedPtrID getPtrId(const void* ptr); +}; + +} // end namespace llvm +#endif diff --git a/include/llvm/CMakeLists.txt b/include/llvm/CMakeLists.txt new file mode 100644 index 0000000..5e4f408 --- /dev/null +++ b/include/llvm/CMakeLists.txt @@ -0,0 +1,19 @@ +set(LLVM_TARGET_DEFINITIONS Intrinsics.td) + +tablegen(Intrinsics.gen -gen-intrinsic) + +add_custom_target(intrinsics_gen ALL + DEPENDS ${llvm_builded_incs_dir}/Intrinsics.gen) + +set(LLVM_COMMON_DEPENDS ${LLVM_COMMON_DEPENDS} intrinsics_gen PARENT_SCOPE) + +if( MSVC_IDE OR XCODE ) + # Creates a dummy target containing all headers for the benefit of + # Visual Studio users. + file(GLOB_RECURSE headers *.h) + add_td_sources(headers) + add_library(llvm_headers_do_not_build EXCLUDE_FROM_ALL + # We need at least one source file: + ${LLVM_MAIN_SRC_DIR}/lib/Transforms/Hello/Hello.cpp + ${headers}) +endif() diff --git a/include/llvm/CallGraphSCCPass.h b/include/llvm/CallGraphSCCPass.h new file mode 100644 index 0000000..d5ff17c --- /dev/null +++ b/include/llvm/CallGraphSCCPass.h @@ -0,0 +1,73 @@ +//===- CallGraphSCCPass.h - Pass that operates BU on call graph -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the CallGraphSCCPass class, which is used for passes which +// are implemented as bottom-up traversals on the call graph. Because there may +// be cycles in the call graph, passes of this type operate on the call-graph in +// SCC order: that is, they process function bottom-up, except for recursive +// functions, which they process all at once. +// +// These passes are inherently interprocedural, and are required to keep the +// call graph up-to-date if they do anything which could modify it. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CALL_GRAPH_SCC_PASS_H +#define LLVM_CALL_GRAPH_SCC_PASS_H + +#include "llvm/Pass.h" + +namespace llvm { + +class CallGraphNode; +class CallGraph; +class PMStack; + +struct CallGraphSCCPass : public Pass { + + explicit CallGraphSCCPass(intptr_t pid) : Pass(pid) {} + explicit CallGraphSCCPass(void *pid) : Pass(pid) {} + + /// doInitialization - This method is called before the SCC's of the program + /// has been processed, allowing the pass to do initialization as necessary. + virtual bool doInitialization(CallGraph &CG) { + return false; + } + + /// runOnSCC - This method should be implemented by the subclass to perform + /// whatever action is necessary for the specified SCC. Note that + /// non-recursive (or only self-recursive) functions will have an SCC size of + /// 1, where recursive portions of the call graph will have SCC size > 1. + /// + virtual bool runOnSCC(const std::vector<CallGraphNode *> &SCC) = 0; + + /// doFinalization - This method is called after the SCC's of the program has + /// been processed, allowing the pass to do final cleanup as necessary. + virtual bool doFinalization(CallGraph &CG) { + return false; + } + + /// Assign pass manager to manager this pass + virtual void assignPassManager(PMStack &PMS, + PassManagerType PMT = PMT_CallGraphPassManager); + + /// Return what kind of Pass Manager can manage this pass. + virtual PassManagerType getPotentialPassManagerType() const { + return PMT_CallGraphPassManager; + } + + /// getAnalysisUsage - For this class, we declare that we require and preserve + /// the call graph. If the derived class implements this method, it should + /// always explicitly call the implementation here. + virtual void getAnalysisUsage(AnalysisUsage &Info) const; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CallingConv.h b/include/llvm/CallingConv.h new file mode 100644 index 0000000..072f7c3 --- /dev/null +++ b/include/llvm/CallingConv.h @@ -0,0 +1,66 @@ +//===-- llvm/CallingConv.h - LLVM Calling Conventions -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines LLVM's set of calling conventions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CALLINGCONV_H +#define LLVM_CALLINGCONV_H + +namespace llvm { + +/// CallingConv Namespace - This namespace contains an enum with a value for +/// the well-known calling conventions. +/// +namespace CallingConv { + /// A set of enums which specify the assigned numeric values for known llvm + /// calling conventions. + /// @brief LLVM Calling Convention Representation + enum ID { + /// C - The default llvm calling convention, compatible with C. This + /// convention is the only calling convention that supports varargs calls. + /// As with typical C calling conventions, the callee/caller have to + /// tolerate certain amounts of prototype mismatch. + C = 0, + + // Generic LLVM calling conventions. None of these calling conventions + // support varargs calls, and all assume that the caller and callee + // prototype exactly match. + + /// Fast - This calling convention attempts to make calls as fast as + /// possible (e.g. by passing things in registers). + Fast = 8, + + // Cold - This calling convention attempts to make code in the caller as + // efficient as possible under the assumption that the call is not commonly + // executed. As such, these calls often preserve all registers so that the + // call does not break any live ranges in the caller side. + Cold = 9, + + // Target - This is the start of the target-specific calling conventions, + // e.g. fastcall and thiscall on X86. + FirstTargetCC = 64, + + /// X86_StdCall - stdcall is the calling conventions mostly used by the + /// Win32 API. It is basically the same as the C convention with the + /// difference in that the callee is responsible for popping the arguments + /// from the stack. + X86_StdCall = 64, + + /// X86_FastCall - 'fast' analog of X86_StdCall. Passes first two arguments + /// in ECX:EDX registers, others - via stack. Callee is responsible for + /// stack cleaning. + X86_FastCall = 65 + }; +} // End CallingConv namespace + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h new file mode 100644 index 0000000..a004632 --- /dev/null +++ b/include/llvm/CodeGen/AsmPrinter.h @@ -0,0 +1,406 @@ +//===-- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a class to be used as the base class for target specific +// asm writers. This class primarily handles common functionality used by +// all asm writers. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_ASMPRINTER_H +#define LLVM_CODEGEN_ASMPRINTER_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Target/TargetMachine.h" +#include <set> + +namespace llvm { + class GCStrategy; + class Constant; + class ConstantArray; + class ConstantInt; + class ConstantStruct; + class ConstantVector; + class GCMetadataPrinter; + class GlobalVariable; + class MachineConstantPoolEntry; + class MachineConstantPoolValue; + class DwarfWriter; + class Mangler; + class Section; + class TargetAsmInfo; + class Type; + class raw_ostream; + + /// AsmPrinter - This class is intended to be used as a driving class for all + /// asm writers. + class AsmPrinter : public MachineFunctionPass { + static char ID; + + /// FunctionNumber - This provides a unique ID for each function emitted in + /// this translation unit. It is autoincremented by SetupMachineFunction, + /// and can be accessed with getFunctionNumber() and + /// IncrementFunctionNumber(). + /// + unsigned FunctionNumber; + + // GCMetadataPrinters - The garbage collection metadata printer table. + typedef DenseMap<GCStrategy*,GCMetadataPrinter*> gcp_map_type; + typedef gcp_map_type::iterator gcp_iterator; + gcp_map_type GCMetadataPrinters; + + protected: + /// DW -This is needed because printDeclare() has to insert + /// DbgVariable entries into the dwarf table. This is a short term hack + /// that ought be fixed soon. + DwarfWriter *DW; + + // Necessary for external weak linkage support + std::set<const GlobalValue*> ExtWeakSymbols; + + /// OptLevel - Generating code at a specific optimization level. + CodeGenOpt::Level OptLevel; + public: + /// Output stream on which we're printing assembly code. + /// + raw_ostream &O; + + /// Target machine description. + /// + TargetMachine &TM; + + /// Target Asm Printer information. + /// + const TargetAsmInfo *TAI; + + /// Target Register Information. + /// + const TargetRegisterInfo *TRI; + + /// The current machine function. + const MachineFunction *MF; + + /// Name-mangler for global names. + /// + Mangler *Mang; + + /// Cache of mangled name for current function. This is recalculated at the + /// beginning of each call to runOnMachineFunction(). + /// + std::string CurrentFnName; + + /// CurrentSection - The current section we are emitting to. This is + /// controlled and used by the SwitchSection method. + std::string CurrentSection; + const Section* CurrentSection_; + + /// IsInTextSection - True if the current section we are emitting to is a + /// text section. + bool IsInTextSection; + + /// VerboseAsm - Emit comments in assembly output if this is true. + /// + bool VerboseAsm; + + protected: + explicit AsmPrinter(raw_ostream &o, TargetMachine &TM, + const TargetAsmInfo *T, CodeGenOpt::Level OL, bool V); + + public: + virtual ~AsmPrinter(); + + /// isVerbose - Return true if assembly output should contain comments. + /// + bool isVerbose() const { return VerboseAsm; } + + /// SwitchToTextSection - Switch to the specified section of the executable + /// if we are not already in it! If GV is non-null and if the global has an + /// explicitly requested section, we switch to the section indicated for the + /// global instead of NewSection. + /// + /// If the new section is an empty string, this method forgets what the + /// current section is, but does not emit a .section directive. + /// + /// This method is used when about to emit executable code. + /// + void SwitchToTextSection(const char *NewSection, const GlobalValue *GV = NULL); + + /// SwitchToDataSection - Switch to the specified section of the executable + /// if we are not already in it! If GV is non-null and if the global has an + /// explicitly requested section, we switch to the section indicated for the + /// global instead of NewSection. + /// + /// If the new section is an empty string, this method forgets what the + /// current section is, but does not emit a .section directive. + /// + /// This method is used when about to emit data. For most assemblers, this + /// is the same as the SwitchToTextSection method, but not all assemblers + /// are the same. + /// + void SwitchToDataSection(const char *NewSection, const GlobalValue *GV = NULL); + + /// SwitchToSection - Switch to the specified section of the executable if + /// we are not already in it! + void SwitchToSection(const Section* NS); + + /// getGlobalLinkName - Returns the asm/link name of of the specified + /// global variable. Should be overridden by each target asm printer to + /// generate the appropriate value. + virtual const std::string &getGlobalLinkName(const GlobalVariable *GV, + std::string &LinkName) const; + + /// EmitExternalGlobal - Emit the external reference to a global variable. + /// Should be overridden if an indirect reference should be used. + virtual void EmitExternalGlobal(const GlobalVariable *GV); + + /// getCurrentFunctionEHName - Called to return (and cache) the + /// CurrentFnEHName. + /// + const std::string &getCurrentFunctionEHName(const MachineFunction *MF, + std::string &FuncEHName) const; + + protected: + /// getAnalysisUsage - Record analysis usage. + /// + void getAnalysisUsage(AnalysisUsage &AU) const; + + /// doInitialization - Set up the AsmPrinter when we are working on a new + /// module. If your pass overrides this, it must make sure to explicitly + /// call this implementation. + bool doInitialization(Module &M); + + /// doFinalization - Shut down the asmprinter. If you override this in your + /// pass, you must make sure to call it explicitly. + bool doFinalization(Module &M); + + /// PrintSpecial - Print information related to the specified machine instr + /// that is independent of the operand, and may be independent of the instr + /// itself. This can be useful for portably encoding the comment character + /// or other bits of target-specific knowledge into the asmstrings. The + /// syntax used is ${:comment}. Targets can override this to add support + /// for their own strange codes. + virtual void PrintSpecial(const MachineInstr *MI, const char *Code) const; + + /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM + /// instruction, using the specified assembler variant. Targets should + /// override this to format as appropriate. This method can return true if + /// the operand is erroneous. + virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, const char *ExtraCode); + + /// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM + /// instruction, using the specified assembler variant as an address. + /// Targets should override this to format as appropriate. This method can + /// return true if the operand is erroneous. + virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, + const char *ExtraCode); + + /// SetupMachineFunction - This should be called when a new MachineFunction + /// is being processed from runOnMachineFunction. + void SetupMachineFunction(MachineFunction &MF); + + /// getFunctionNumber - Return a unique ID for the current function. + /// + unsigned getFunctionNumber() const { return FunctionNumber; } + + /// IncrementFunctionNumber - Increase Function Number. AsmPrinters should + /// not normally call this, as the counter is automatically bumped by + /// SetupMachineFunction. + void IncrementFunctionNumber() { FunctionNumber++; } + + /// EmitConstantPool - Print to the current output stream assembly + /// representations of the constants in the constant pool MCP. This is + /// used to print out constants which have been "spilled to memory" by + /// the code generator. + /// + void EmitConstantPool(MachineConstantPool *MCP); + + /// EmitJumpTableInfo - Print assembly representations of the jump tables + /// used by the current function to the current output stream. + /// + void EmitJumpTableInfo(MachineJumpTableInfo *MJTI, MachineFunction &MF); + + /// EmitSpecialLLVMGlobal - Check to see if the specified global is a + /// special global used by LLVM. If so, emit it and return true, otherwise + /// do nothing and return false. + bool EmitSpecialLLVMGlobal(const GlobalVariable *GV); + + public: + //===------------------------------------------------------------------===// + /// LEB 128 number encoding. + + /// PrintULEB128 - Print a series of hexidecimal values(separated by commas) + /// representing an unsigned leb128 value. + void PrintULEB128(unsigned Value) const; + + /// PrintSLEB128 - Print a series of hexidecimal values(separated by commas) + /// representing a signed leb128 value. + void PrintSLEB128(int Value) const; + + //===------------------------------------------------------------------===// + // Emission and print routines + // + + /// PrintHex - Print a value as a hexidecimal value. + /// + void PrintHex(int Value) const; + + /// EOL - Print a newline character to asm stream. If a comment is present + /// then it will be printed first. Comments should not contain '\n'. + void EOL() const; + void EOL(const std::string &Comment) const; + void EOL(const char* Comment) const; + + /// EmitULEB128Bytes - Emit an assembler byte data directive to compose an + /// unsigned leb128 value. + void EmitULEB128Bytes(unsigned Value) const; + + /// EmitSLEB128Bytes - print an assembler byte data directive to compose a + /// signed leb128 value. + void EmitSLEB128Bytes(int Value) const; + + /// EmitInt8 - Emit a byte directive and value. + /// + void EmitInt8(int Value) const; + + /// EmitInt16 - Emit a short directive and value. + /// + void EmitInt16(int Value) const; + + /// EmitInt32 - Emit a long directive and value. + /// + void EmitInt32(int Value) const; + + /// EmitInt64 - Emit a long long directive and value. + /// + void EmitInt64(uint64_t Value) const; + + /// EmitString - Emit a string with quotes and a null terminator. + /// Special characters are emitted properly. + /// @verbatim (Eg. '\t') @endverbatim + void EmitString(const std::string &String) const; + void EmitString(const char *String, unsigned Size) const; + + /// EmitFile - Emit a .file directive. + void EmitFile(unsigned Number, const std::string &Name) const; + + //===------------------------------------------------------------------===// + + /// EmitAlignment - Emit an alignment directive to the specified power of + /// two boundary. For example, if you pass in 3 here, you will get an 8 + /// byte alignment. If a global value is specified, and if that global has + /// an explicit alignment requested, it will unconditionally override the + /// alignment request. However, if ForcedAlignBits is specified, this value + /// has final say: the ultimate alignment will be the max of ForcedAlignBits + /// and the alignment computed with NumBits and the global. If UseFillExpr + /// is true, it also emits an optional second value FillValue which the + /// assembler uses to fill gaps to match alignment for text sections if the + /// has specified a non-zero fill value. + /// + /// The algorithm is: + /// Align = NumBits; + /// if (GV && GV->hasalignment) Align = GV->getalignment(); + /// Align = std::max(Align, ForcedAlignBits); + /// + void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0, + unsigned ForcedAlignBits = 0, + bool UseFillExpr = true) const; + + /// printLabel - This method prints a local label used by debug and + /// exception handling tables. + void printLabel(const MachineInstr *MI) const; + void printLabel(unsigned Id) const; + + /// printDeclare - This method prints a local variable declaration used by + /// debug tables. + void printDeclare(const MachineInstr *MI) const; + + protected: + /// EmitZeros - Emit a block of zeros. + /// + void EmitZeros(uint64_t NumZeros, unsigned AddrSpace = 0) const; + + /// EmitString - Emit a zero-byte-terminated string constant. + /// + virtual void EmitString(const ConstantArray *CVA) const; + + /// EmitConstantValueOnly - Print out the specified constant, without a + /// storage class. Only constants of first-class type are allowed here. + void EmitConstantValueOnly(const Constant *CV); + + /// EmitGlobalConstant - Print a general LLVM constant to the .s file. + void EmitGlobalConstant(const Constant* CV, unsigned AddrSpace = 0); + + virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV); + + /// processDebugLoc - Processes the debug information of each machine + /// instruction's DebugLoc. + void processDebugLoc(DebugLoc DL); + + /// printInlineAsm - This method formats and prints the specified machine + /// instruction that is an inline asm. + void printInlineAsm(const MachineInstr *MI) const; + + /// printImplicitDef - This method prints the specified machine instruction + /// that is an implicit def. + virtual void printImplicitDef(const MachineInstr *MI) const; + + /// printBasicBlockLabel - This method prints the label for the specified + /// MachineBasicBlock + virtual void printBasicBlockLabel(const MachineBasicBlock *MBB, + bool printAlign = false, + bool printColon = false, + bool printComment = true) const; + + /// printPICJumpTableSetLabel - This method prints a set label for the + /// specified MachineBasicBlock for a jumptable entry. + virtual void printPICJumpTableSetLabel(unsigned uid, + const MachineBasicBlock *MBB) const; + virtual void printPICJumpTableSetLabel(unsigned uid, unsigned uid2, + const MachineBasicBlock *MBB) const; + virtual void printPICJumpTableEntry(const MachineJumpTableInfo *MJTI, + const MachineBasicBlock *MBB, + unsigned uid) const; + + /// printDataDirective - This method prints the asm directive for the + /// specified type. + void printDataDirective(const Type *type, unsigned AddrSpace = 0); + + /// printSuffixedName - This prints a name with preceding + /// getPrivateGlobalPrefix and the specified suffix, handling quoted names + /// correctly. + void printSuffixedName(const char *Name, const char *Suffix, + const char *Prefix = 0); + void printSuffixedName(const std::string &Name, const char* Suffix); + + /// printVisibility - This prints visibility information about symbol, if + /// this is suported by the target. + void printVisibility(const std::string& Name, unsigned Visibility) const; + + /// printOffset - This is just convenient handler for printing offsets. + void printOffset(int64_t Offset) const; + + private: + const GlobalValue *findGlobalValue(const Constant* CV); + void EmitLLVMUsedList(Constant *List); + void EmitXXStructorList(Constant *List); + void EmitGlobalConstantStruct(const ConstantStruct* CVS, + unsigned AddrSpace); + void EmitGlobalConstantArray(const ConstantArray* CVA, unsigned AddrSpace); + void EmitGlobalConstantVector(const ConstantVector* CP); + void EmitGlobalConstantFP(const ConstantFP* CFP, unsigned AddrSpace); + void EmitGlobalConstantLargeInt(const ConstantInt* CI, unsigned AddrSpace); + GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C); + }; +} + +#endif diff --git a/include/llvm/CodeGen/BreakCriticalMachineEdge.h b/include/llvm/CodeGen/BreakCriticalMachineEdge.h new file mode 100644 index 0000000..4861297 --- /dev/null +++ b/include/llvm/CodeGen/BreakCriticalMachineEdge.h @@ -0,0 +1,108 @@ +//===--------- BreakCriticalMachineEdge.h - Break critical edges ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// Helper function to break a critical machine edge. +// +//===---------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_BREAKCRITICALMACHINEEDGE_H +#define LLVM_CODEGEN_BREAKCRITICALMACHINEEDGE_H + +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +MachineBasicBlock* SplitCriticalMachineEdge(MachineBasicBlock* src, + MachineBasicBlock* dst) { + MachineFunction &MF = *src->getParent(); + const BasicBlock* srcBB = src->getBasicBlock(); + + MachineBasicBlock* crit_mbb = MF.CreateMachineBasicBlock(srcBB); + + // modify the llvm control flow graph + src->removeSuccessor(dst); + src->addSuccessor(crit_mbb); + crit_mbb->addSuccessor(dst); + + // insert the new block into the machine function. + MF.push_back(crit_mbb); + + // insert a unconditional branch linking the new block to dst + const TargetMachine& TM = MF.getTarget(); + const TargetInstrInfo* TII = TM.getInstrInfo(); + std::vector<MachineOperand> emptyConditions; + TII->InsertBranch(*crit_mbb, dst, (MachineBasicBlock*)0, + emptyConditions); + + // modify every branch in src that points to dst to point to the new + // machine basic block instead: + MachineBasicBlock::iterator mii = src->end(); + bool found_branch = false; + while (mii != src->begin()) { + mii--; + // if there are no more branches, finish the loop + if (!mii->getDesc().isTerminator()) { + break; + } + + // Scan the operands of this branch, replacing any uses of dst with + // crit_mbb. + for (unsigned i = 0, e = mii->getNumOperands(); i != e; ++i) { + MachineOperand & mo = mii->getOperand(i); + if (mo.isMBB() && mo.getMBB() == dst) { + found_branch = true; + mo.setMBB(crit_mbb); + } + } + } + + // TODO: This is tentative. It may be necessary to fix this code. Maybe + // I am inserting too many gotos, but I am trusting that the asm printer + // will optimize the unnecessary gotos. + if(!found_branch) { + TII->InsertBranch(*src, crit_mbb, (MachineBasicBlock*)0, + emptyConditions); + } + + /// Change all the phi functions in dst, so that the incoming block be + /// crit_mbb, instead of src + for(mii = dst->begin(); mii != dst->end(); mii++) { + /// the first instructions are always phi functions. + if(mii->getOpcode() != TargetInstrInfo::PHI) + break; + + // Find the operands corresponding to the source block + std::vector<unsigned> toRemove; + unsigned reg = 0; + for (unsigned u = 0; u != mii->getNumOperands(); ++u) + if (mii->getOperand(u).isMBB() && + mii->getOperand(u).getMBB() == src) { + reg = mii->getOperand(u-1).getReg(); + toRemove.push_back(u-1); + } + // Remove all uses of this MBB + for (std::vector<unsigned>::reverse_iterator I = toRemove.rbegin(), + E = toRemove.rend(); I != E; ++I) { + mii->RemoveOperand(*I+1); + mii->RemoveOperand(*I); + } + + // Add a single use corresponding to the new MBB + mii->addOperand(MachineOperand::CreateReg(reg, false)); + mii->addOperand(MachineOperand::CreateMBB(crit_mbb)); + } + + return crit_mbb; +} + +} + +#endif diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h new file mode 100644 index 0000000..7c83e24 --- /dev/null +++ b/include/llvm/CodeGen/CallingConvLower.h @@ -0,0 +1,274 @@ +//===-- llvm/CallingConvLower.h - Calling Conventions -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the CCState and CCValAssign classes, used for lowering +// and implementing calling conventions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H +#define LLVM_CODEGEN_CALLINGCONVLOWER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" + +namespace llvm { + class TargetRegisterInfo; + class TargetMachine; + class CCState; + class SDNode; + +/// CCValAssign - Represent assignment of one arg/retval to a location. +class CCValAssign { +public: + enum LocInfo { + Full, // The value fills the full location. + SExt, // The value is sign extended in the location. + ZExt, // The value is zero extended in the location. + AExt, // The value is extended with undefined upper bits. + BCvt // The value is bit-converted in the location. + // TODO: a subset of the value is in the location. + }; +private: + /// ValNo - This is the value number begin assigned (e.g. an argument number). + unsigned ValNo; + + /// Loc is either a stack offset or a register number. + unsigned Loc; + + /// isMem - True if this is a memory loc, false if it is a register loc. + bool isMem : 1; + + /// isCustom - True if this arg/retval requires special handling. + bool isCustom : 1; + + /// Information about how the value is assigned. + LocInfo HTP : 6; + + /// ValVT - The type of the value being assigned. + MVT ValVT; + + /// LocVT - The type of the location being assigned to. + MVT LocVT; +public: + + static CCValAssign getReg(unsigned ValNo, MVT ValVT, + unsigned RegNo, MVT LocVT, + LocInfo HTP) { + CCValAssign Ret; + Ret.ValNo = ValNo; + Ret.Loc = RegNo; + Ret.isMem = false; + Ret.isCustom = false; + Ret.HTP = HTP; + Ret.ValVT = ValVT; + Ret.LocVT = LocVT; + return Ret; + } + + static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, + unsigned RegNo, MVT LocVT, + LocInfo HTP) { + CCValAssign Ret; + Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP); + Ret.isCustom = true; + return Ret; + } + + static CCValAssign getMem(unsigned ValNo, MVT ValVT, + unsigned Offset, MVT LocVT, + LocInfo HTP) { + CCValAssign Ret; + Ret.ValNo = ValNo; + Ret.Loc = Offset; + Ret.isMem = true; + Ret.isCustom = false; + Ret.HTP = HTP; + Ret.ValVT = ValVT; + Ret.LocVT = LocVT; + return Ret; + } + + static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, + unsigned Offset, MVT LocVT, + LocInfo HTP) { + CCValAssign Ret; + Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP); + Ret.isCustom = true; + return Ret; + } + + unsigned getValNo() const { return ValNo; } + MVT getValVT() const { return ValVT; } + + bool isRegLoc() const { return !isMem; } + bool isMemLoc() const { return isMem; } + + bool needsCustom() const { return isCustom; } + + unsigned getLocReg() const { assert(isRegLoc()); return Loc; } + unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; } + MVT getLocVT() const { return LocVT; } + + LocInfo getLocInfo() const { return HTP; } +}; + +/// CCAssignFn - This function assigns a location for Val, updating State to +/// reflect the change. +typedef bool CCAssignFn(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State); + +/// CCCustomFn - This function assigns a location for Val, possibly updating +/// all args to reflect changes and indicates if it handled it. It must set +/// isCustom if it handles the arg and returns true. +typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State); + +/// CCState - This class holds information needed while lowering arguments and +/// return values. It captures which registers are already assigned and which +/// stack slots are used. It provides accessors to allocate these values. +class CCState { + unsigned CallingConv; + bool IsVarArg; + const TargetMachine &TM; + const TargetRegisterInfo &TRI; + SmallVector<CCValAssign, 16> &Locs; + + unsigned StackOffset; + SmallVector<uint32_t, 16> UsedRegs; +public: + CCState(unsigned CC, bool isVarArg, const TargetMachine &TM, + SmallVector<CCValAssign, 16> &locs); + + void addLoc(const CCValAssign &V) { + Locs.push_back(V); + } + + const TargetMachine &getTarget() const { return TM; } + unsigned getCallingConv() const { return CallingConv; } + bool isVarArg() const { return IsVarArg; } + + unsigned getNextStackOffset() const { return StackOffset; } + + /// isAllocated - Return true if the specified register (or an alias) is + /// allocated. + bool isAllocated(unsigned Reg) const { + return UsedRegs[Reg/32] & (1 << (Reg&31)); + } + + /// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node, + /// incorporating info about the formals into this state. + void AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn); + + /// AnalyzeReturn - Analyze the returned values of an ISD::RET node, + /// incorporating info about the result values into this state. + void AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn); + + /// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info + /// about the passed values into this state. + void AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn); + + /// AnalyzeCallOperands - Same as above except it takes vectors of types + /// and argument flags. + void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs, + SmallVectorImpl<ISD::ArgFlagsTy> &Flags, + CCAssignFn Fn); + + /// AnalyzeCallResult - Analyze the return values of an ISD::CALL node, + /// incorporating info about the passed values into this state. + void AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn); + + /// AnalyzeCallResult - Same as above except it's specialized for calls which + /// produce a single value. + void AnalyzeCallResult(MVT VT, CCAssignFn Fn); + + /// getFirstUnallocated - Return the first unallocated register in the set, or + /// NumRegs if they are all allocated. + unsigned getFirstUnallocated(const unsigned *Regs, unsigned NumRegs) const { + for (unsigned i = 0; i != NumRegs; ++i) + if (!isAllocated(Regs[i])) + return i; + return NumRegs; + } + + /// AllocateReg - Attempt to allocate one register. If it is not available, + /// return zero. Otherwise, return the register, marking it and any aliases + /// as allocated. + unsigned AllocateReg(unsigned Reg) { + if (isAllocated(Reg)) return 0; + MarkAllocated(Reg); + return Reg; + } + + /// Version of AllocateReg with extra register to be shadowed. + unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) { + if (isAllocated(Reg)) return 0; + MarkAllocated(Reg); + MarkAllocated(ShadowReg); + return Reg; + } + + /// AllocateReg - Attempt to allocate one of the specified registers. If none + /// are available, return zero. Otherwise, return the first one available, + /// marking it and any aliases as allocated. + unsigned AllocateReg(const unsigned *Regs, unsigned NumRegs) { + unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs); + if (FirstUnalloc == NumRegs) + return 0; // Didn't find the reg. + + // Mark the register and any aliases as allocated. + unsigned Reg = Regs[FirstUnalloc]; + MarkAllocated(Reg); + return Reg; + } + + /// Version of AllocateReg with list of registers to be shadowed. + unsigned AllocateReg(const unsigned *Regs, const unsigned *ShadowRegs, + unsigned NumRegs) { + unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs); + if (FirstUnalloc == NumRegs) + return 0; // Didn't find the reg. + + // Mark the register and any aliases as allocated. + unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc]; + MarkAllocated(Reg); + MarkAllocated(ShadowReg); + return Reg; + } + + /// AllocateStack - Allocate a chunk of stack space with the specified size + /// and alignment. + unsigned AllocateStack(unsigned Size, unsigned Align) { + assert(Align && ((Align-1) & Align) == 0); // Align is power of 2. + StackOffset = ((StackOffset + Align-1) & ~(Align-1)); + unsigned Result = StackOffset; + StackOffset += Size; + return Result; + } + + // HandleByVal - Allocate a stack slot large enough to pass an argument by + // value. The size and alignment information of the argument is encoded in its + // parameter attribute. + void HandleByVal(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, + int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags); + +private: + /// MarkAllocated - Mark a register and all of its aliases as allocated. + void MarkAllocated(unsigned Reg); +}; + + + +} // end namespace llvm + +#endif diff --git a/include/llvm/CodeGen/DAGISelHeader.h b/include/llvm/CodeGen/DAGISelHeader.h new file mode 100644 index 0000000..b2acbc1 --- /dev/null +++ b/include/llvm/CodeGen/DAGISelHeader.h @@ -0,0 +1,136 @@ +//==-llvm/CodeGen/DAGISelHeader.h - Common DAG ISel definitions -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides definitions of the common, target-independent methods and +// data, which is used by SelectionDAG-based instruction selectors. +// +// *** NOTE: This file is #included into the middle of the target +// instruction selector class. These functions are really methods. +// This is a little awkward, but it allows this code to be shared +// by all the targets while still being able to call into +// target-specific code without using a virtual function call. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_DAGISEL_HEADER_H +#define LLVM_CODEGEN_DAGISEL_HEADER_H + +/// ISelPosition - Node iterator marking the current position of +/// instruction selection as it procedes through the topologically-sorted +/// node list. +SelectionDAG::allnodes_iterator ISelPosition; + +/// IsChainCompatible - Returns true if Chain is Op or Chain does +/// not reach Op. +static bool IsChainCompatible(SDNode *Chain, SDNode *Op) { + if (Chain->getOpcode() == ISD::EntryToken) + return true; + if (Chain->getOpcode() == ISD::TokenFactor) + return false; + if (Chain->getNumOperands() > 0) { + SDValue C0 = Chain->getOperand(0); + if (C0.getValueType() == MVT::Other) + return C0.getNode() != Op && IsChainCompatible(C0.getNode(), Op); + } + return true; +} + +/// ISelUpdater - helper class to handle updates of the +/// instruciton selection graph. +class VISIBILITY_HIDDEN ISelUpdater : public SelectionDAG::DAGUpdateListener { + SelectionDAG::allnodes_iterator &ISelPosition; +public: + explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp) + : ISelPosition(isp) {} + + /// NodeDeleted - Handle nodes deleted from the graph. If the + /// node being deleted is the current ISelPosition node, update + /// ISelPosition. + /// + virtual void NodeDeleted(SDNode *N, SDNode *E) { + if (ISelPosition == SelectionDAG::allnodes_iterator(N)) + ++ISelPosition; + } + + /// NodeUpdated - Ignore updates for now. + virtual void NodeUpdated(SDNode *N) {} +}; + +/// ReplaceUses - replace all uses of the old node F with the use +/// of the new node T. +void ReplaceUses(SDValue F, SDValue T) DISABLE_INLINE { + ISelUpdater ISU(ISelPosition); + CurDAG->ReplaceAllUsesOfValueWith(F, T, &ISU); +} + +/// ReplaceUses - replace all uses of the old nodes F with the use +/// of the new nodes T. +void ReplaceUses(const SDValue *F, const SDValue *T, + unsigned Num) DISABLE_INLINE { + ISelUpdater ISU(ISelPosition); + CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num, &ISU); +} + +/// ReplaceUses - replace all uses of the old node F with the use +/// of the new node T. +void ReplaceUses(SDNode *F, SDNode *T) DISABLE_INLINE { + ISelUpdater ISU(ISelPosition); + CurDAG->ReplaceAllUsesWith(F, T, &ISU); +} + +/// SelectRoot - Top level entry to DAG instruction selector. +/// Selects instructions starting at the root of the current DAG. +void SelectRoot(SelectionDAG &DAG) { + SelectRootInit(); + + // Create a dummy node (which is not added to allnodes), that adds + // a reference to the root node, preventing it from being deleted, + // and tracking any changes of the root. + HandleSDNode Dummy(CurDAG->getRoot()); + ISelPosition = next(SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode())); + + // The AllNodes list is now topological-sorted. Visit the + // nodes by starting at the end of the list (the root of the + // graph) and preceding back toward the beginning (the entry + // node). + while (ISelPosition != CurDAG->allnodes_begin()) { + SDNode *Node = --ISelPosition; + // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes, + // but there are currently some corner cases that it misses. Also, this + // makes it theoretically possible to disable the DAGCombiner. + if (Node->use_empty()) + continue; +#if 0 + DAG.setSubgraphColor(Node, "red"); +#endif + SDNode *ResNode = Select(SDValue(Node, 0)); + // If node should not be replaced, + // continue with the next one. + if (ResNode == Node) + continue; + // Replace node. + if (ResNode) { +#if 0 + DAG.setSubgraphColor(ResNode, "yellow"); + DAG.setSubgraphColor(ResNode, "black"); +#endif + ReplaceUses(Node, ResNode); + } + // If after the replacement this node is not used any more, + // remove this dead node. + if (Node->use_empty()) { // Don't delete EntryToken, etc. + ISelUpdater ISU(ISelPosition); + CurDAG->RemoveDeadNode(Node, &ISU); + } + } + + CurDAG->setRoot(Dummy.getValue()); +} + +#endif /* LLVM_CODEGEN_DAGISEL_HEADER_H */ diff --git a/include/llvm/CodeGen/DebugLoc.h b/include/llvm/CodeGen/DebugLoc.h new file mode 100644 index 0000000..77e6733 --- /dev/null +++ b/include/llvm/CodeGen/DebugLoc.h @@ -0,0 +1,101 @@ +//===---- llvm/CodeGen/DebugLoc.h - Debug Location Information --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a number of light weight data structures used by the code +// generator to describe and track debug location information. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_DEBUGLOC_H +#define LLVM_CODEGEN_DEBUGLOC_H + +#include "llvm/ADT/DenseMap.h" +#include <vector> + +namespace llvm { + class GlobalVariable; + + /// DebugLocTuple - Debug location tuple of filename id, line and column. + /// + struct DebugLocTuple { + GlobalVariable *CompileUnit; + unsigned Line, Col; + + DebugLocTuple(GlobalVariable *v, unsigned l, unsigned c) + : CompileUnit(v), Line(l), Col(c) {}; + + bool operator==(const DebugLocTuple &DLT) const { + return CompileUnit == DLT.CompileUnit && + Line == DLT.Line && Col == DLT.Col; + } + bool operator!=(const DebugLocTuple &DLT) const { + return !(*this == DLT); + } + }; + + /// DebugLoc - Debug location id. This is carried by SDNode and MachineInstr + /// to index into a vector of unique debug location tuples. + class DebugLoc { + unsigned Idx; + + public: + DebugLoc() : Idx(~0U) {} // Defaults to invalid. + + static DebugLoc getUnknownLoc() { DebugLoc L; L.Idx = ~0U; return L; } + static DebugLoc get(unsigned idx) { DebugLoc L; L.Idx = idx; return L; } + + unsigned getIndex() const { return Idx; } + + /// isUnknown - Return true if there is no debug info for the SDNode / + /// MachineInstr. + bool isUnknown() const { return Idx == ~0U; } + + bool operator==(const DebugLoc &DL) const { return Idx == DL.Idx; } + bool operator!=(const DebugLoc &DL) const { return !(*this == DL); } + }; + + // Partially specialize DenseMapInfo for DebugLocTyple. + template<> struct DenseMapInfo<DebugLocTuple> { + static inline DebugLocTuple getEmptyKey() { + return DebugLocTuple(0, ~0U, ~0U); + } + static inline DebugLocTuple getTombstoneKey() { + return DebugLocTuple((GlobalVariable*)~1U, ~1U, ~1U); + } + static unsigned getHashValue(const DebugLocTuple &Val) { + return DenseMapInfo<GlobalVariable*>::getHashValue(Val.CompileUnit) ^ + DenseMapInfo<unsigned>::getHashValue(Val.Line) ^ + DenseMapInfo<unsigned>::getHashValue(Val.Col); + } + static bool isEqual(const DebugLocTuple &LHS, const DebugLocTuple &RHS) { + return LHS.CompileUnit == RHS.CompileUnit && + LHS.Line == RHS.Line && + LHS.Col == RHS.Col; + } + + static bool isPod() { return true; } + }; + + /// DebugLocTracker - This class tracks debug location information. + /// + struct DebugLocTracker { + /// DebugLocations - A vector of unique DebugLocTuple's. + /// + std::vector<DebugLocTuple> DebugLocations; + + /// DebugIdMap - This maps DebugLocTuple's to indices into the + /// DebugLocations vector. + DenseMap<DebugLocTuple, unsigned> DebugIdMap; + + DebugLocTracker() {} + }; + +} // end namespace llvm + +#endif /* LLVM_CODEGEN_DEBUGLOC_H */ diff --git a/include/llvm/CodeGen/DwarfWriter.h b/include/llvm/CodeGen/DwarfWriter.h new file mode 100644 index 0000000..facd5f6 --- /dev/null +++ b/include/llvm/CodeGen/DwarfWriter.h @@ -0,0 +1,123 @@ +//===-- llvm/CodeGen/DwarfWriter.h - Dwarf Framework ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for writing Dwarf debug and exception info into +// asm files. For Details on the Dwarf 3 specfication see DWARF Debugging +// Information Format V.3 reference manual http://dwarf.freestandards.org , +// +// The role of the Dwarf Writer class is to extract information from the +// MachineModuleInfo object, organize it in Dwarf form and then emit it into asm +// the current asm file using data and high level Dwarf directives. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_DWARFWRITER_H +#define LLVM_CODEGEN_DWARFWRITER_H + +#include "llvm/Pass.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class AsmPrinter; +class DwarfDebug; +class DwarfException; +class MachineModuleInfo; +class MachineFunction; +class MachineInstr; +class Value; +class Module; +class GlobalVariable; +class TargetAsmInfo; +class raw_ostream; +class Instruction; +class DICompileUnit; +class DISubprogram; +class DIVariable; + +//===----------------------------------------------------------------------===// +// DwarfWriter - Emits Dwarf debug and exception handling directives. +// + +class DwarfWriter : public ImmutablePass { +private: + /// DD - Provides the DwarfWriter debug implementation. + /// + DwarfDebug *DD; + + /// DE - Provides the DwarfWriter exception implementation. + /// + DwarfException *DE; + +public: + static char ID; // Pass identification, replacement for typeid + + DwarfWriter(); + virtual ~DwarfWriter(); + + //===--------------------------------------------------------------------===// + // Main entry points. + // + + /// BeginModule - Emit all Dwarf sections that should come prior to the + /// content. + void BeginModule(Module *M, MachineModuleInfo *MMI, raw_ostream &OS, + AsmPrinter *A, const TargetAsmInfo *T); + + /// EndModule - Emit all Dwarf sections that should come after the content. + /// + void EndModule(); + + /// BeginFunction - Gather pre-function debug information. Assumes being + /// emitted immediately after the function entry point. + void BeginFunction(MachineFunction *MF); + + /// EndFunction - Gather and emit post-function debug information. + /// + void EndFunction(MachineFunction *MF); + + /// RecordSourceLine - Register a source line with debug info. Returns a + /// unique label ID used to generate a label and provide correspondence to + /// the source line list. + unsigned RecordSourceLine(unsigned Line, unsigned Col, DICompileUnit CU); + + /// RecordRegionStart - Indicate the start of a region. + unsigned RecordRegionStart(GlobalVariable *V); + + /// RecordRegionEnd - Indicate the end of a region. + unsigned RecordRegionEnd(GlobalVariable *V); + + /// getRecordSourceLineCount - Count source lines. + unsigned getRecordSourceLineCount(); + + /// RecordVariable - Indicate the declaration of a local variable. + /// + void RecordVariable(GlobalVariable *GV, unsigned FrameIndex, + const MachineInstr *MI); + + /// ShouldEmitDwarfDebug - Returns true if Dwarf debugging declarations should + /// be emitted. + bool ShouldEmitDwarfDebug() const; + + //// RecordInlinedFnStart - Indicate the start of a inlined function. + unsigned RecordInlinedFnStart(DISubprogram SP, DICompileUnit CU, + unsigned Line, unsigned Col); + + /// RecordInlinedFnEnd - Indicate the end of inlined subroutine. + unsigned RecordInlinedFnEnd(DISubprogram SP); + + /// RecordVariableScope - Record scope for the variable declared by + /// DeclareMI. DeclareMI must describe TargetInstrInfo::DECLARE. + void RecordVariableScope(DIVariable &DV, const MachineInstr *DeclareMI); +}; + + +} // end llvm namespace + +#endif diff --git a/include/llvm/CodeGen/ELFRelocation.h b/include/llvm/CodeGen/ELFRelocation.h new file mode 100644 index 0000000..c3f88f1 --- /dev/null +++ b/include/llvm/CodeGen/ELFRelocation.h @@ -0,0 +1,51 @@ +//=== ELFRelocation.h - ELF Relocation Info ---------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ELFRelocation class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_ELF_RELOCATION_H +#define LLVM_CODEGEN_ELF_RELOCATION_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + + /// ELFRelocation - This class contains all the information necessary to + /// to generate any 32-bit or 64-bit ELF relocation entry. + class ELFRelocation { + uint64_t r_offset; // offset in the section of the object this applies to + uint32_t r_symidx; // symbol table index of the symbol to use + uint32_t r_type; // machine specific relocation type + int64_t r_add; // explicit relocation addend + bool r_rela; // if true then the addend is part of the entry + // otherwise the addend is at the location specified + // by r_offset + public: + + uint64_t getInfo(bool is64Bit = false) const { + if (is64Bit) + return ((uint64_t)r_symidx << 32) + ((uint64_t)r_type & 0xFFFFFFFFL); + else + return (r_symidx << 8) + (r_type & 0xFFL); + } + + uint64_t getOffset() const { return r_offset; } + uint64_t getAddress() const { return r_add; } + + ELFRelocation(uint64_t off, uint32_t sym, uint32_t type, + bool rela = true, int64_t addend = 0) : + r_offset(off), r_symidx(sym), r_type(type), + r_add(addend), r_rela(rela) {} + }; + +} // end llvm namespace + +#endif // LLVM_CODEGEN_ELF_RELOCATION_H diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h new file mode 100644 index 0000000..38c1710 --- /dev/null +++ b/include/llvm/CodeGen/FastISel.h @@ -0,0 +1,315 @@ +//===-- FastISel.h - Definition of the FastISel class ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the FastISel class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_FASTISEL_H +#define LLVM_CODEGEN_FASTISEL_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/CodeGen/DebugLoc.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" + +namespace llvm { + +class AllocaInst; +class ConstantFP; +class Instruction; +class MachineBasicBlock; +class MachineConstantPool; +class MachineFunction; +class MachineFrameInfo; +class MachineModuleInfo; +class DwarfWriter; +class MachineRegisterInfo; +class TargetData; +class TargetInstrInfo; +class TargetLowering; +class TargetMachine; +class TargetRegisterClass; + +/// FastISel - This is a fast-path instruction selection class that +/// generates poor code and doesn't support illegal types or non-trivial +/// lowering, but runs quickly. +class FastISel { +protected: + MachineBasicBlock *MBB; + DenseMap<const Value *, unsigned> LocalValueMap; + DenseMap<const Value *, unsigned> &ValueMap; + DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap; + DenseMap<const AllocaInst *, int> &StaticAllocaMap; +#ifndef NDEBUG + SmallSet<Instruction*, 8> &CatchInfoLost; +#endif + MachineFunction &MF; + MachineModuleInfo *MMI; + DwarfWriter *DW; + MachineRegisterInfo &MRI; + MachineFrameInfo &MFI; + MachineConstantPool &MCP; + DebugLoc DL; + const TargetMachine &TM; + const TargetData &TD; + const TargetInstrInfo &TII; + const TargetLowering &TLI; + +public: + /// startNewBlock - Set the current block to which generated machine + /// instructions will be appended, and clear the local CSE map. + /// + void startNewBlock(MachineBasicBlock *mbb) { + setCurrentBlock(mbb); + LocalValueMap.clear(); + } + + /// setCurrentBlock - Set the current block to which generated machine + /// instructions will be appended. + /// + void setCurrentBlock(MachineBasicBlock *mbb) { + MBB = mbb; + } + + /// setCurDebugLoc - Set the current debug location information, which is used + /// when creating a machine instruction. + /// + void setCurDebugLoc(DebugLoc dl) { DL = dl; } + + /// getCurDebugLoc() - Return current debug location information. + DebugLoc getCurDebugLoc() const { return DL; } + + /// SelectInstruction - Do "fast" instruction selection for the given + /// LLVM IR instruction, and append generated machine instructions to + /// the current block. Return true if selection was successful. + /// + bool SelectInstruction(Instruction *I); + + /// SelectInstruction - Do "fast" instruction selection for the given + /// LLVM IR operator (Instruction or ConstantExpr), and append + /// generated machine instructions to the current block. Return true + /// if selection was successful. + /// + bool SelectOperator(User *I, unsigned Opcode); + + /// TargetSelectInstruction - This method is called by target-independent + /// code when the normal FastISel process fails to select an instruction. + /// This gives targets a chance to emit code for anything that doesn't + /// fit into FastISel's framework. It returns true if it was successful. + /// + virtual bool + TargetSelectInstruction(Instruction *I) = 0; + + /// getRegForValue - Create a virtual register and arrange for it to + /// be assigned the value for the given LLVM value. + unsigned getRegForValue(Value *V); + + /// lookUpRegForValue - Look up the value to see if its value is already + /// cached in a register. It may be defined by instructions across blocks or + /// defined locally. + unsigned lookUpRegForValue(Value *V); + + /// getRegForGEPIndex - This is a wrapper around getRegForValue that also + /// takes care of truncating or sign-extending the given getelementptr + /// index value. + unsigned getRegForGEPIndex(Value *V); + + virtual ~FastISel(); + +protected: + FastISel(MachineFunction &mf, + MachineModuleInfo *mmi, + DwarfWriter *dw, + DenseMap<const Value *, unsigned> &vm, + DenseMap<const BasicBlock *, MachineBasicBlock *> &bm, + DenseMap<const AllocaInst *, int> &am +#ifndef NDEBUG + , SmallSet<Instruction*, 8> &cil +#endif + ); + + /// FastEmit_r - This method is called by target-independent code + /// to request that an instruction with the given type and opcode + /// be emitted. + virtual unsigned FastEmit_(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode); + + /// FastEmit_r - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// register operand be emitted. + /// + virtual unsigned FastEmit_r(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, unsigned Op0); + + /// FastEmit_rr - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// register operands be emitted. + /// + virtual unsigned FastEmit_rr(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, + unsigned Op0, unsigned Op1); + + /// FastEmit_ri - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// register and immediate operands be emitted. + /// + virtual unsigned FastEmit_ri(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, + unsigned Op0, uint64_t Imm); + + /// FastEmit_rf - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// register and floating-point immediate operands be emitted. + /// + virtual unsigned FastEmit_rf(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, + unsigned Op0, ConstantFP *FPImm); + + /// FastEmit_rri - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// register and immediate operands be emitted. + /// + virtual unsigned FastEmit_rri(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, + unsigned Op0, unsigned Op1, uint64_t Imm); + + /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries + /// to emit an instruction with an immediate operand using FastEmit_ri. + /// If that fails, it materializes the immediate into a register and try + /// FastEmit_rr instead. + unsigned FastEmit_ri_(MVT::SimpleValueType VT, + ISD::NodeType Opcode, + unsigned Op0, uint64_t Imm, + MVT::SimpleValueType ImmType); + + /// FastEmit_rf_ - This method is a wrapper of FastEmit_rf. It first tries + /// to emit an instruction with an immediate operand using FastEmit_rf. + /// If that fails, it materializes the immediate into a register and try + /// FastEmit_rr instead. + unsigned FastEmit_rf_(MVT::SimpleValueType VT, + ISD::NodeType Opcode, + unsigned Op0, ConstantFP *FPImm, + MVT::SimpleValueType ImmType); + + /// FastEmit_i - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// immediate operand be emitted. + virtual unsigned FastEmit_i(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, + uint64_t Imm); + + /// FastEmit_f - This method is called by target-independent code + /// to request that an instruction with the given type, opcode, and + /// floating-point immediate operand be emitted. + virtual unsigned FastEmit_f(MVT::SimpleValueType VT, + MVT::SimpleValueType RetVT, + ISD::NodeType Opcode, + ConstantFP *FPImm); + + /// FastEmitInst_ - Emit a MachineInstr with no operands and a + /// result register in the given register class. + /// + unsigned FastEmitInst_(unsigned MachineInstOpcode, + const TargetRegisterClass *RC); + + /// FastEmitInst_r - Emit a MachineInstr with one register operand + /// and a result register in the given register class. + /// + unsigned FastEmitInst_r(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0); + + /// FastEmitInst_rr - Emit a MachineInstr with two register operands + /// and a result register in the given register class. + /// + unsigned FastEmitInst_rr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, unsigned Op1); + + /// FastEmitInst_ri - Emit a MachineInstr with two register operands + /// and a result register in the given register class. + /// + unsigned FastEmitInst_ri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, uint64_t Imm); + + /// FastEmitInst_rf - Emit a MachineInstr with two register operands + /// and a result register in the given register class. + /// + unsigned FastEmitInst_rf(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, ConstantFP *FPImm); + + /// FastEmitInst_rri - Emit a MachineInstr with two register operands, + /// an immediate, and a result register in the given register class. + /// + unsigned FastEmitInst_rri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, unsigned Op1, uint64_t Imm); + + /// FastEmitInst_i - Emit a MachineInstr with a single immediate + /// operand, and a result register in the given register class. + unsigned FastEmitInst_i(unsigned MachineInstrOpcode, + const TargetRegisterClass *RC, + uint64_t Imm); + + /// FastEmitInst_extractsubreg - Emit a MachineInstr for an extract_subreg + /// from a specified index of a superregister to a specified type. + unsigned FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT, + unsigned Op0, uint32_t Idx); + + /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op + /// with all but the least significant bit set to zero. + unsigned FastEmitZExtFromI1(MVT::SimpleValueType VT, + unsigned Op); + + /// FastEmitBranch - Emit an unconditional branch to the given block, + /// unless it is the immediate (fall-through) successor, and update + /// the CFG. + void FastEmitBranch(MachineBasicBlock *MBB); + + unsigned UpdateValueMap(Value* I, unsigned Reg); + + unsigned createResultReg(const TargetRegisterClass *RC); + + /// TargetMaterializeConstant - Emit a constant in a register using + /// target-specific logic, such as constant pool loads. + virtual unsigned TargetMaterializeConstant(Constant* C) { + return 0; + } + + /// TargetMaterializeAlloca - Emit an alloca address in a register using + /// target-specific logic. + virtual unsigned TargetMaterializeAlloca(AllocaInst* C) { + return 0; + } + +private: + bool SelectBinaryOp(User *I, ISD::NodeType ISDOpcode); + + bool SelectGetElementPtr(User *I); + + bool SelectCall(User *I); + + bool SelectBitCast(User *I); + + bool SelectCast(User *I, ISD::NodeType Opcode); +}; + +} + +#endif diff --git a/include/llvm/CodeGen/FileWriters.h b/include/llvm/CodeGen/FileWriters.h new file mode 100644 index 0000000..b3781e0 --- /dev/null +++ b/include/llvm/CodeGen/FileWriters.h @@ -0,0 +1,31 @@ +//===-- FileWriters.h - File Writers Creation Functions ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Functions to add the various file writer passes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_FILEWRITERS_H +#define LLVM_CODEGEN_FILEWRITERS_H + +namespace llvm { + + class PassManagerBase; + class MachineCodeEmitter; + class TargetMachine; + class raw_ostream; + + MachineCodeEmitter *AddELFWriter(PassManagerBase &FPM, raw_ostream &O, + TargetMachine &TM); + MachineCodeEmitter *AddMachOWriter(PassManagerBase &FPM, raw_ostream &O, + TargetMachine &TM); + +} // end llvm namespace + +#endif // LLVM_CODEGEN_FILEWRITERS_H diff --git a/include/llvm/CodeGen/GCMetadata.h b/include/llvm/CodeGen/GCMetadata.h new file mode 100644 index 0000000..e94aba3 --- /dev/null +++ b/include/llvm/CodeGen/GCMetadata.h @@ -0,0 +1,192 @@ +//===-- GCMetadata.h - Garbage collector metadata -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the GCFunctionInfo and GCModuleInfo classes, which are +// used as a communication channel from the target code generator to the target +// garbage collectors. This interface allows code generators and garbage +// collectors to be developed independently. +// +// The GCFunctionInfo class logs the data necessary to build a type accurate +// stack map. The code generator outputs: +// +// - Safe points as specified by the GCStrategy's NeededSafePoints. +// - Stack offsets for GC roots, as specified by calls to llvm.gcroot +// +// As a refinement, liveness analysis calculates the set of live roots at each +// safe point. Liveness analysis is not presently performed by the code +// generator, so all roots are assumed live. +// +// GCModuleInfo simply collects GCFunctionInfo instances for each Function as +// they are compiled. This accretion is necessary for collectors which must emit +// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo +// outlives the MachineFunction from which it is derived and must not refer to +// any code generator data structures. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_GCMETADATA_H +#define LLVM_CODEGEN_GCMETADATA_H + +#include "llvm/Pass.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StringMap.h" + +namespace llvm { + + class AsmPrinter; + class GCStrategy; + class Constant; + class TargetAsmInfo; + + + namespace GC { + /// PointKind - The type of a collector-safe point. + /// + enum PointKind { + Loop, //< Instr is a loop (backwards branch). + Return, //< Instr is a return instruction. + PreCall, //< Instr is a call instruction. + PostCall //< Instr is the return address of a call. + }; + } + + /// GCPoint - Metadata for a collector-safe point in machine code. + /// + struct GCPoint { + GC::PointKind Kind; //< The kind of the safe point. + unsigned Num; //< Usually a label. + + GCPoint(GC::PointKind K, unsigned N) : Kind(K), Num(N) {} + }; + + /// GCRoot - Metadata for a pointer to an object managed by the garbage + /// collector. + struct GCRoot { + int Num; //< Usually a frame index. + int StackOffset; //< Offset from the stack pointer. + Constant *Metadata; //< Metadata straight from the call to llvm.gcroot. + + GCRoot(int N, Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {} + }; + + + /// GCFunctionInfo - Garbage collection metadata for a single function. + /// + class GCFunctionInfo { + public: + typedef std::vector<GCPoint>::iterator iterator; + typedef std::vector<GCRoot>::iterator roots_iterator; + typedef std::vector<GCRoot>::const_iterator live_iterator; + + private: + const Function &F; + GCStrategy &S; + uint64_t FrameSize; + std::vector<GCRoot> Roots; + std::vector<GCPoint> SafePoints; + + // FIXME: Liveness. A 2D BitVector, perhaps? + // + // BitVector Liveness; + // + // bool islive(int point, int root) = + // Liveness[point * SafePoints.size() + root] + // + // The bit vector is the more compact representation where >3.2% of roots + // are live per safe point (1.5% on 64-bit hosts). + + public: + GCFunctionInfo(const Function &F, GCStrategy &S); + ~GCFunctionInfo(); + + /// getFunction - Return the function to which this metadata applies. + /// + const Function &getFunction() const { return F; } + + /// getStrategy - Return the GC strategy for the function. + /// + GCStrategy &getStrategy() { return S; } + + /// addStackRoot - Registers a root that lives on the stack. Num is the + /// stack object ID for the alloca (if the code generator is + // using MachineFrameInfo). + void addStackRoot(int Num, Constant *Metadata) { + Roots.push_back(GCRoot(Num, Metadata)); + } + + /// addSafePoint - Notes the existence of a safe point. Num is the ID of the + /// label just prior to the safe point (if the code generator is using + /// MachineModuleInfo). + void addSafePoint(GC::PointKind Kind, unsigned Num) { + SafePoints.push_back(GCPoint(Kind, Num)); + } + + /// getFrameSize/setFrameSize - Records the function's frame size. + /// + uint64_t getFrameSize() const { return FrameSize; } + void setFrameSize(uint64_t S) { FrameSize = S; } + + /// begin/end - Iterators for safe points. + /// + iterator begin() { return SafePoints.begin(); } + iterator end() { return SafePoints.end(); } + size_t size() const { return SafePoints.size(); } + + /// roots_begin/roots_end - Iterators for all roots in the function. + /// + roots_iterator roots_begin() { return Roots.begin(); } + roots_iterator roots_end () { return Roots.end(); } + size_t roots_size() const { return Roots.size(); } + + /// live_begin/live_end - Iterators for live roots at a given safe point. + /// + live_iterator live_begin(const iterator &p) { return roots_begin(); } + live_iterator live_end (const iterator &p) { return roots_end(); } + size_t live_size(const iterator &p) const { return roots_size(); } + }; + + + /// GCModuleInfo - Garbage collection metadata for a whole module. + /// + class GCModuleInfo : public ImmutablePass { + typedef StringMap<GCStrategy*> strategy_map_type; + typedef std::vector<GCStrategy*> list_type; + typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type; + + strategy_map_type StrategyMap; + list_type StrategyList; + finfo_map_type FInfoMap; + + GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name); + + public: + typedef list_type::const_iterator iterator; + + static char ID; + + GCModuleInfo(); + ~GCModuleInfo(); + + /// clear - Resets the pass. The metadata deleter pass calls this. + /// + void clear(); + + /// begin/end - Iterators for used strategies. + /// + iterator begin() const { return StrategyList.begin(); } + iterator end() const { return StrategyList.end(); } + + /// get - Look up function metadata. + /// + GCFunctionInfo &getFunctionInfo(const Function &F); + }; + +} + +#endif diff --git a/include/llvm/CodeGen/GCMetadataPrinter.h b/include/llvm/CodeGen/GCMetadataPrinter.h new file mode 100644 index 0000000..b693b1b --- /dev/null +++ b/include/llvm/CodeGen/GCMetadataPrinter.h @@ -0,0 +1,76 @@ +//===-- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The abstract base class GCMetadataPrinter supports writing GC metadata tables +// as assembly code. This is a separate class from GCStrategy in order to allow +// users of the LLVM JIT to avoid linking with the AsmWriter. +// +// Subclasses of GCMetadataPrinter must be registered using the +// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself +// because these subclasses are logically plugins for the AsmWriter. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H +#define LLVM_CODEGEN_GCMETADATAPRINTER_H + +#include "llvm/CodeGen/GCMetadata.h" +#include "llvm/CodeGen/GCStrategy.h" +#include "llvm/Support/Registry.h" + +namespace llvm { + + class GCMetadataPrinter; + class raw_ostream; + + /// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the + /// defaults from Registry. + typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry; + + /// GCMetadataPrinter - Emits GC metadata as assembly code. + /// + class GCMetadataPrinter { + public: + typedef GCStrategy::list_type list_type; + typedef GCStrategy::iterator iterator; + + private: + GCStrategy *S; + + friend class AsmPrinter; + + protected: + // May only be subclassed. + GCMetadataPrinter(); + + // Do not implement. + GCMetadataPrinter(const GCMetadataPrinter &); + GCMetadataPrinter &operator=(const GCMetadataPrinter &); + + public: + GCStrategy &getStrategy() { return *S; } + const Module &getModule() const { return S->getModule(); } + + /// begin/end - Iterate over the collected function metadata. + iterator begin() { return S->begin(); } + iterator end() { return S->end(); } + + /// beginAssembly/finishAssembly - Emit module metadata as assembly code. + virtual void beginAssembly(raw_ostream &OS, AsmPrinter &AP, + const TargetAsmInfo &TAI); + + virtual void finishAssembly(raw_ostream &OS, AsmPrinter &AP, + const TargetAsmInfo &TAI); + + virtual ~GCMetadataPrinter(); + }; + +} + +#endif diff --git a/include/llvm/CodeGen/GCStrategy.h b/include/llvm/CodeGen/GCStrategy.h new file mode 100644 index 0000000..cd760db --- /dev/null +++ b/include/llvm/CodeGen/GCStrategy.h @@ -0,0 +1,142 @@ +//===-- llvm/CodeGen/GCStrategy.h - Garbage collection ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// GCStrategy coordinates code generation algorithms and implements some itself +// in order to generate code compatible with a target code generator as +// specified in a function's 'gc' attribute. Algorithms are enabled by setting +// flags in a subclass's constructor, and some virtual methods can be +// overridden. +// +// When requested, the GCStrategy will be populated with data about each +// function which uses it. Specifically: +// +// - Safe points +// Garbage collection is generally only possible at certain points in code. +// GCStrategy can request that the collector insert such points: +// +// - At and after any call to a subroutine +// - Before returning from the current function +// - Before backwards branches (loops) +// +// - Roots +// When a reference to a GC-allocated object exists on the stack, it must be +// stored in an alloca registered with llvm.gcoot. +// +// This information can used to emit the metadata tables which are required by +// the target garbage collector runtime. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_GCSTRATEGY_H +#define LLVM_CODEGEN_GCSTRATEGY_H + +#include "llvm/CodeGen/GCMetadata.h" +#include "llvm/Support/Registry.h" +#include <string> + +namespace llvm { + + class GCStrategy; + + /// The GC strategy registry uses all the defaults from Registry. + /// + typedef Registry<GCStrategy> GCRegistry; + + /// GCStrategy describes a garbage collector algorithm's code generation + /// requirements, and provides overridable hooks for those needs which cannot + /// be abstractly described. + class GCStrategy { + public: + typedef std::vector<GCFunctionInfo*> list_type; + typedef list_type::iterator iterator; + + private: + friend class GCModuleInfo; + const Module *M; + std::string Name; + + list_type Functions; + + protected: + unsigned NeededSafePoints; //< Bitmask of required safe points. + bool CustomReadBarriers; //< Default is to insert loads. + bool CustomWriteBarriers; //< Default is to insert stores. + bool CustomRoots; //< Default is to pass through to backend. + bool InitRoots; //< If set, roots are nulled during lowering. + bool UsesMetadata; //< If set, backend must emit metadata tables. + + public: + GCStrategy(); + + virtual ~GCStrategy(); + + + /// getName - The name of the GC strategy, for debugging. + /// + const std::string &getName() const { return Name; } + + /// getModule - The module within which the GC strategy is operating. + /// + const Module &getModule() const { return *M; } + + /// needsSafePoitns - True if safe points of any kind are required. By + // default, none are recorded. + bool needsSafePoints() const { return NeededSafePoints != 0; } + + /// needsSafePoint(Kind) - True if the given kind of safe point is + // required. By default, none are recorded. + bool needsSafePoint(GC::PointKind Kind) const { + return (NeededSafePoints & 1 << Kind) != 0; + } + + /// customWriteBarrier - By default, write barriers are replaced with simple + /// store instructions. If true, then + /// performCustomLowering must instead lower them. + bool customWriteBarrier() const { return CustomWriteBarriers; } + + /// customReadBarrier - By default, read barriers are replaced with simple + /// load instructions. If true, then + /// performCustomLowering must instead lower them. + bool customReadBarrier() const { return CustomReadBarriers; } + + /// customRoots - By default, roots are left for the code generator so it + /// can generate a stack map. If true, then + // performCustomLowering must delete them. + bool customRoots() const { return CustomRoots; } + + /// initializeRoots - If set, gcroot intrinsics should initialize their + // allocas to null before the first use. This is + // necessary for most GCs and is enabled by default. + bool initializeRoots() const { return InitRoots; } + + /// usesMetadata - If set, appropriate metadata tables must be emitted by + /// the back-end (assembler, JIT, or otherwise). + bool usesMetadata() const { return UsesMetadata; } + + /// begin/end - Iterators for function metadata. + /// + iterator begin() { return Functions.begin(); } + iterator end() { return Functions.end(); } + + /// insertFunctionMetadata - Creates metadata for a function. + /// + GCFunctionInfo *insertFunctionInfo(const Function &F); + + /// initializeCustomLowering/performCustomLowering - If any of the actions + /// are set to custom, performCustomLowering must be overriden to transform + /// the corresponding actions to LLVM IR. initializeCustomLowering is + /// optional to override. These are the only GCStrategy methods through + /// which the LLVM IR can be modified. + virtual bool initializeCustomLowering(Module &F); + virtual bool performCustomLowering(Function &F); + }; + +} + +#endif diff --git a/include/llvm/CodeGen/GCs.h b/include/llvm/CodeGen/GCs.h new file mode 100644 index 0000000..c407b61 --- /dev/null +++ b/include/llvm/CodeGen/GCs.h @@ -0,0 +1,35 @@ +//===-- GCs.h - Garbage collector linkage hacks ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains hack functions to force linking in the GC components. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_GCS_H +#define LLVM_CODEGEN_GCS_H + +namespace llvm { + class GCStrategy; + class GCMetadataPrinter; + + /// FIXME: Collector instances are not useful on their own. These no longer + /// serve any purpose except to link in the plugins. + + /// Creates an ocaml-compatible garbage collector. + void linkOcamlGC(); + + /// Creates an ocaml-compatible metadata printer. + void linkOcamlGCPrinter(); + + /// Creates a shadow stack garbage collector. This collector requires no code + /// generator support. + void linkShadowStackGC(); +} + +#endif diff --git a/include/llvm/CodeGen/IntrinsicLowering.h b/include/llvm/CodeGen/IntrinsicLowering.h new file mode 100644 index 0000000..6628329 --- /dev/null +++ b/include/llvm/CodeGen/IntrinsicLowering.h @@ -0,0 +1,50 @@ +//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the IntrinsicLowering interface. This interface allows +// addition of domain-specific or front-end specific intrinsics to LLVM without +// having to modify all of the C backend or interpreter. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H +#define LLVM_CODEGEN_INTRINSICLOWERING_H + +#include "llvm/Intrinsics.h" + +namespace llvm { + class CallInst; + class Module; + class TargetData; + + class IntrinsicLowering { + const TargetData& TD; + public: + explicit IntrinsicLowering(const TargetData &td) : TD(td) {} + + /// AddPrototypes - This method, if called, causes all of the prototypes + /// that might be needed by an intrinsic lowering implementation to be + /// inserted into the module specified. + void AddPrototypes(Module &M); + + /// LowerIntrinsicCall - This method replaces a call with the LLVM function + /// which should be used to implement the specified intrinsic function call. + /// If an intrinsic function must be implemented by the code generator + /// (such as va_start), this function should print a message and abort. + /// + /// Otherwise, if an intrinsic function call can be lowered, the code to + /// implement it (often a call to a non-intrinsic function) is inserted + /// _after_ the call instruction and the call is deleted. The caller must + /// be capable of handling this kind of change. + /// + void LowerIntrinsicCall(CallInst *CI); + }; +} + +#endif diff --git a/include/llvm/CodeGen/JITCodeEmitter.h b/include/llvm/CodeGen/JITCodeEmitter.h new file mode 100644 index 0000000..bf6b76e --- /dev/null +++ b/include/llvm/CodeGen/JITCodeEmitter.h @@ -0,0 +1,322 @@ +//===-- llvm/CodeGen/JITCodeEmitter.h - Code emission ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an abstract interface that is used by the machine code +// emission framework to output the code. This allows machine code emission to +// be separated from concerns such as resolution of call targets, and where the +// machine code will be written (memory or disk, f.e.). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_JITCODEEMITTER_H +#define LLVM_CODEGEN_JITCODEEMITTER_H + +#include <string> +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/Streams.h" +#include "llvm/CodeGen/MachineCodeEmitter.h" + +using namespace std; + +namespace llvm { + +class MachineBasicBlock; +class MachineConstantPool; +class MachineJumpTableInfo; +class MachineFunction; +class MachineModuleInfo; +class MachineRelocation; +class Value; +class GlobalValue; +class Function; + +/// JITCodeEmitter - This class defines two sorts of methods: those for +/// emitting the actual bytes of machine code, and those for emitting auxillary +/// structures, such as jump tables, relocations, etc. +/// +/// Emission of machine code is complicated by the fact that we don't (in +/// general) know the size of the machine code that we're about to emit before +/// we emit it. As such, we preallocate a certain amount of memory, and set the +/// BufferBegin/BufferEnd pointers to the start and end of the buffer. As we +/// emit machine instructions, we advance the CurBufferPtr to indicate the +/// location of the next byte to emit. In the case of a buffer overflow (we +/// need to emit more machine code than we have allocated space for), the +/// CurBufferPtr will saturate to BufferEnd and ignore stores. Once the entire +/// function has been emitted, the overflow condition is checked, and if it has +/// occurred, more memory is allocated, and we reemit the code into it. +/// +class JITCodeEmitter : public MachineCodeEmitter { +public: + virtual ~JITCodeEmitter() {} + + /// startFunction - This callback is invoked when the specified function is + /// about to be code generated. This initializes the BufferBegin/End/Ptr + /// fields. + /// + virtual void startFunction(MachineFunction &F) = 0; + + /// finishFunction - This callback is invoked when the specified function has + /// finished code generation. If a buffer overflow has occurred, this method + /// returns true (the callee is required to try again), otherwise it returns + /// false. + /// + virtual bool finishFunction(MachineFunction &F) = 0; + + /// startGVStub - This callback is invoked when the JIT needs the + /// address of a GV (e.g. function) that has not been code generated yet. + /// The StubSize specifies the total size required by the stub. + /// + virtual void startGVStub(const GlobalValue* GV, unsigned StubSize, + unsigned Alignment = 1) = 0; + + /// startGVStub - This callback is invoked when the JIT needs the address of a + /// GV (e.g. function) that has not been code generated yet. Buffer points to + /// memory already allocated for this stub. + /// + virtual void startGVStub(const GlobalValue* GV, void *Buffer, + unsigned StubSize) = 0; + + /// finishGVStub - This callback is invoked to terminate a GV stub. + /// + virtual void *finishGVStub(const GlobalValue* F) = 0; + + /// emitByte - This callback is invoked when a byte needs to be written to the + /// output stream. + /// + void emitByte(uint8_t B) { + if (CurBufferPtr != BufferEnd) + *CurBufferPtr++ = B; + } + + /// emitWordLE - This callback is invoked when a 32-bit word needs to be + /// written to the output stream in little-endian format. + /// + void emitWordLE(unsigned W) { + if (4 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 0); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 24); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitWordBE - This callback is invoked when a 32-bit word needs to be + /// written to the output stream in big-endian format. + /// + void emitWordBE(unsigned W) { + if (4 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 24); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 0); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitDWordLE - This callback is invoked when a 64-bit word needs to be + /// written to the output stream in little-endian format. + /// + void emitDWordLE(uint64_t W) { + if (8 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 0); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 24); + *CurBufferPtr++ = (uint8_t)(W >> 32); + *CurBufferPtr++ = (uint8_t)(W >> 40); + *CurBufferPtr++ = (uint8_t)(W >> 48); + *CurBufferPtr++ = (uint8_t)(W >> 56); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitDWordBE - This callback is invoked when a 64-bit word needs to be + /// written to the output stream in big-endian format. + /// + void emitDWordBE(uint64_t W) { + if (8 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 56); + *CurBufferPtr++ = (uint8_t)(W >> 48); + *CurBufferPtr++ = (uint8_t)(W >> 40); + *CurBufferPtr++ = (uint8_t)(W >> 32); + *CurBufferPtr++ = (uint8_t)(W >> 24); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 0); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitAlignment - Move the CurBufferPtr pointer up the the specified + /// alignment (saturated to BufferEnd of course). + void emitAlignment(unsigned Alignment) { + if (Alignment == 0) Alignment = 1; + + if(Alignment <= (uintptr_t)(BufferEnd-CurBufferPtr)) { + // Move the current buffer ptr up to the specified alignment. + CurBufferPtr = + (uint8_t*)(((uintptr_t)CurBufferPtr+Alignment-1) & + ~(uintptr_t)(Alignment-1)); + } else { + CurBufferPtr = BufferEnd; + } + } + + + /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be + /// written to the output stream. + void emitULEB128Bytes(unsigned Value) { + do { + uint8_t Byte = Value & 0x7f; + Value >>= 7; + if (Value) Byte |= 0x80; + emitByte(Byte); + } while (Value); + } + + /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be + /// written to the output stream. + void emitSLEB128Bytes(int32_t Value) { + int32_t Sign = Value >> (8 * sizeof(Value) - 1); + bool IsMore; + + do { + uint8_t Byte = Value & 0x7f; + Value >>= 7; + IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0; + if (IsMore) Byte |= 0x80; + emitByte(Byte); + } while (IsMore); + } + + /// emitString - This callback is invoked when a String needs to be + /// written to the output stream. + void emitString(const std::string &String) { + for (unsigned i = 0, N = static_cast<unsigned>(String.size()); + i < N; ++i) { + uint8_t C = String[i]; + emitByte(C); + } + emitByte(0); + } + + /// emitInt32 - Emit a int32 directive. + void emitInt32(int32_t Value) { + if (4 <= BufferEnd-CurBufferPtr) { + *((uint32_t*)CurBufferPtr) = Value; + CurBufferPtr += 4; + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitInt64 - Emit a int64 directive. + void emitInt64(uint64_t Value) { + if (8 <= BufferEnd-CurBufferPtr) { + *((uint64_t*)CurBufferPtr) = Value; + CurBufferPtr += 8; + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitInt32At - Emit the Int32 Value in Addr. + void emitInt32At(uintptr_t *Addr, uintptr_t Value) { + if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd) + (*(uint32_t*)Addr) = (uint32_t)Value; + } + + /// emitInt64At - Emit the Int64 Value in Addr. + void emitInt64At(uintptr_t *Addr, uintptr_t Value) { + if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd) + (*(uint64_t*)Addr) = (uint64_t)Value; + } + + + /// emitLabel - Emits a label + virtual void emitLabel(uint64_t LabelID) = 0; + + /// allocateSpace - Allocate a block of space in the current output buffer, + /// returning null (and setting conditions to indicate buffer overflow) on + /// failure. Alignment is the alignment in bytes of the buffer desired. + virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) { + emitAlignment(Alignment); + void *Result; + + // Check for buffer overflow. + if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) { + CurBufferPtr = BufferEnd; + Result = 0; + } else { + // Allocate the space. + Result = CurBufferPtr; + CurBufferPtr += Size; + } + + return Result; + } + + /// StartMachineBasicBlock - This should be called by the target when a new + /// basic block is about to be emitted. This way the MCE knows where the + /// start of the block is, and can implement getMachineBasicBlockAddress. + virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) = 0; + + /// getCurrentPCValue - This returns the address that the next emitted byte + /// will be output to. + /// + virtual uintptr_t getCurrentPCValue() const { + return (uintptr_t)CurBufferPtr; + } + + /// getCurrentPCOffset - Return the offset from the start of the emitted + /// buffer that we are currently writing to. + uintptr_t getCurrentPCOffset() const { + return CurBufferPtr-BufferBegin; + } + + /// addRelocation - Whenever a relocatable address is needed, it should be + /// noted with this interface. + virtual void addRelocation(const MachineRelocation &MR) = 0; + + /// FIXME: These should all be handled with relocations! + + /// getConstantPoolEntryAddress - Return the address of the 'Index' entry in + /// the constant pool that was last emitted with the emitConstantPool method. + /// + virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const = 0; + + /// getJumpTableEntryAddress - Return the address of the jump table with index + /// 'Index' in the function that last called initJumpTableInfo. + /// + virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const = 0; + + /// getMachineBasicBlockAddress - Return the address of the specified + /// MachineBasicBlock, only usable after the label for the MBB has been + /// emitted. + /// + virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0; + + /// getLabelAddress - Return the address of the specified LabelID, only usable + /// after the LabelID has been emitted. + /// + virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0; + + /// Specifies the MachineModuleInfo object. This is used for exception handling + /// purposes. + virtual void setModuleInfo(MachineModuleInfo* Info) = 0; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/LatencyPriorityQueue.h b/include/llvm/CodeGen/LatencyPriorityQueue.h new file mode 100644 index 0000000..71fae2a --- /dev/null +++ b/include/llvm/CodeGen/LatencyPriorityQueue.h @@ -0,0 +1,112 @@ +//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the LatencyPriorityQueue class, which is a +// SchedulingPriorityQueue that schedules using latency information to +// reduce the length of the critical path through the basic block. +// +//===----------------------------------------------------------------------===// + +#ifndef LATENCY_PRIORITY_QUEUE_H +#define LATENCY_PRIORITY_QUEUE_H + +#include "llvm/CodeGen/ScheduleDAG.h" +#include "llvm/ADT/PriorityQueue.h" + +namespace llvm { + class LatencyPriorityQueue; + + /// Sorting functions for the Available queue. + struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> { + LatencyPriorityQueue *PQ; + explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {} + + bool operator()(const SUnit* left, const SUnit* right) const; + }; + + class LatencyPriorityQueue : public SchedulingPriorityQueue { + // SUnits - The SUnits for the current graph. + std::vector<SUnit> *SUnits; + + /// NumNodesSolelyBlocking - This vector contains, for every node in the + /// Queue, the number of nodes that the node is the sole unscheduled + /// predecessor for. This is used as a tie-breaker heuristic for better + /// mobility. + std::vector<unsigned> NumNodesSolelyBlocking; + + PriorityQueue<SUnit*, std::vector<SUnit*>, latency_sort> Queue; +public: + LatencyPriorityQueue() : Queue(latency_sort(this)) { + } + + void initNodes(std::vector<SUnit> &sunits) { + SUnits = &sunits; + NumNodesSolelyBlocking.resize(SUnits->size(), 0); + } + + void addNode(const SUnit *SU) { + NumNodesSolelyBlocking.resize(SUnits->size(), 0); + } + + void updateNode(const SUnit *SU) { + } + + void releaseState() { + SUnits = 0; + } + + unsigned getLatency(unsigned NodeNum) const { + assert(NodeNum < (*SUnits).size()); + return (*SUnits)[NodeNum].getHeight(); + } + + unsigned getNumSolelyBlockNodes(unsigned NodeNum) const { + assert(NodeNum < NumNodesSolelyBlocking.size()); + return NumNodesSolelyBlocking[NodeNum]; + } + + unsigned size() const { return Queue.size(); } + + bool empty() const { return Queue.empty(); } + + virtual void push(SUnit *U) { + push_impl(U); + } + void push_impl(SUnit *U); + + void push_all(const std::vector<SUnit *> &Nodes) { + for (unsigned i = 0, e = Nodes.size(); i != e; ++i) + push_impl(Nodes[i]); + } + + SUnit *pop() { + if (empty()) return NULL; + SUnit *V = Queue.top(); + Queue.pop(); + return V; + } + + void remove(SUnit *SU) { + assert(!Queue.empty() && "Not in queue!"); + Queue.erase_one(SU); + } + + // ScheduledNode - As nodes are scheduled, we look to see if there are any + // successor nodes that have a single unscheduled predecessor. If so, that + // single predecessor has a higher priority, since scheduling it will make + // the node available. + void ScheduledNode(SUnit *Node); + +private: + void AdjustPriorityOfUnscheduledPreds(SUnit *SU); + SUnit *getSingleUnscheduledPred(SUnit *SU); + }; +} + +#endif diff --git a/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/include/llvm/CodeGen/LinkAllAsmWriterComponents.h new file mode 100644 index 0000000..1673c89 --- /dev/null +++ b/include/llvm/CodeGen/LinkAllAsmWriterComponents.h @@ -0,0 +1,36 @@ +//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file pulls in all assembler writer related passes for tools like +// llc that need this functionality. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H +#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H + +#include "llvm/CodeGen/GCs.h" + +namespace { + struct ForceAsmWriterLinking { + ForceAsmWriterLinking() { + // We must reference the plug-ins in such a way that compilers will not + // delete it all as dead code, even with whole program optimization, + // yet is effectively a NO-OP. As the compiler isn't smart enough + // to know that getenv() never returns -1, this will do the job. + if (std::getenv("bar") != (char*) -1) + return; + + llvm::linkOcamlGCPrinter(); + + } + } ForceAsmWriterLinking; // Force link by creating a global definition. +} + +#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h new file mode 100644 index 0000000..a231f49 --- /dev/null +++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h @@ -0,0 +1,56 @@ +//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file pulls in all codegen related passes for tools like lli and +// llc that need this functionality. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H +#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H + +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/SchedulerRegistry.h" +#include "llvm/CodeGen/GCs.h" +#include "llvm/Target/TargetMachine.h" + +namespace { + struct ForceCodegenLinking { + ForceCodegenLinking() { + // We must reference the passes in such a way that compilers will not + // delete it all as dead code, even with whole program optimization, + // yet is effectively a NO-OP. As the compiler isn't smart enough + // to know that getenv() never returns -1, this will do the job. + if (std::getenv("bar") != (char*) -1) + return; + + (void) llvm::createDeadMachineInstructionElimPass(); + + (void) llvm::createSimpleRegisterAllocator(); + (void) llvm::createLocalRegisterAllocator(); + (void) llvm::createBigBlockRegisterAllocator(); + (void) llvm::createLinearScanRegisterAllocator(); + (void) llvm::createPBQPRegisterAllocator(); + + (void) llvm::createSimpleRegisterCoalescer(); + + llvm::linkOcamlGC(); + llvm::linkShadowStackGC(); + + (void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default); + (void) llvm::createTDRRListDAGScheduler(NULL, llvm::CodeGenOpt::Default); + (void) llvm::createTDListDAGScheduler(NULL, llvm::CodeGenOpt::Default); + (void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default); + (void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default); + + } + } ForceCodegenLinking; // Force link by creating a global definition. +} + +#endif diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h new file mode 100644 index 0000000..f1ae587 --- /dev/null +++ b/include/llvm/CodeGen/LiveInterval.h @@ -0,0 +1,468 @@ +//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveRange and LiveInterval classes. Given some +// numbering of each the machine instructions an interval [i, j) is said to be a +// live interval for register v if there is no instruction with number j' >= j +// such that v is live at j' and there is no instruction with number i' < i such +// that v is live at i'. In this implementation intervals can have holes, +// i.e. an interval might look like [1,20), [50,65), [1000,1001). Each +// individual range is represented as an instance of LiveRange, and the whole +// interval is represented as an instance of LiveInterval. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVEINTERVAL_H +#define LLVM_CODEGEN_LIVEINTERVAL_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Allocator.h" +#include <iosfwd> +#include <cassert> +#include <climits> + +namespace llvm { + class MachineInstr; + class TargetRegisterInfo; + struct LiveInterval; + + /// VNInfo - If the value number definition is undefined (e.g. phi + /// merge point), it contains ~0u,x. If the value number is not in use, it + /// contains ~1u,x to indicate that the value # is not used. + /// def - Instruction # of the definition. + /// - or reg # of the definition if it's a stack slot liveinterval. + /// copy - Copy iff val# is defined by a copy; zero otherwise. + /// hasPHIKill - One or more of the kills are PHI nodes. + /// redefByEC - Re-defined by early clobber somewhere during the live range. + /// kills - Instruction # of the kills. + struct VNInfo { + unsigned id; + unsigned def; + MachineInstr *copy; + bool hasPHIKill : 1; + bool redefByEC : 1; + SmallVector<unsigned, 4> kills; + VNInfo() + : id(~1U), def(~1U), copy(0), hasPHIKill(false), redefByEC(false) {} + VNInfo(unsigned i, unsigned d, MachineInstr *c) + : id(i), def(d), copy(c), hasPHIKill(false), redefByEC(false) {} + }; + + /// LiveRange structure - This represents a simple register range in the + /// program, with an inclusive start point and an exclusive end point. + /// These ranges are rendered as [start,end). + struct LiveRange { + unsigned start; // Start point of the interval (inclusive) + unsigned end; // End point of the interval (exclusive) + VNInfo *valno; // identifier for the value contained in this interval. + + LiveRange(unsigned S, unsigned E, VNInfo *V) : start(S), end(E), valno(V) { + assert(S < E && "Cannot create empty or backwards range"); + } + + /// contains - Return true if the index is covered by this range. + /// + bool contains(unsigned I) const { + return start <= I && I < end; + } + + bool operator<(const LiveRange &LR) const { + return start < LR.start || (start == LR.start && end < LR.end); + } + bool operator==(const LiveRange &LR) const { + return start == LR.start && end == LR.end; + } + + void dump() const; + void print(std::ostream &os) const; + void print(std::ostream *os) const { if (os) print(*os); } + + private: + LiveRange(); // DO NOT IMPLEMENT + }; + + std::ostream& operator<<(std::ostream& os, const LiveRange &LR); + + + inline bool operator<(unsigned V, const LiveRange &LR) { + return V < LR.start; + } + + inline bool operator<(const LiveRange &LR, unsigned V) { + return LR.start < V; + } + + /// LiveInterval - This class represents some number of live ranges for a + /// register or value. This class also contains a bit of register allocator + /// state. + struct LiveInterval { + typedef SmallVector<LiveRange,4> Ranges; + typedef SmallVector<VNInfo*,4> VNInfoList; + + unsigned reg; // the register or stack slot of this interval + // if the top bits is set, it represents a stack slot. + float weight; // weight of this interval + unsigned short preference; // preferred register for this interval + Ranges ranges; // the ranges in which this register is live + VNInfoList valnos; // value#'s + + public: + + struct InstrSlots { + enum { + LOAD = 0, + USE = 1, + DEF = 2, + STORE = 3, + NUM = 4 + }; + + static unsigned scale(unsigned slot, unsigned factor) { + unsigned index = slot / NUM, + offset = slot % NUM; + assert(index <= ~0U / (factor * NUM) && + "Rescaled interval would overflow"); + return index * NUM * factor + offset; + } + + }; + + LiveInterval(unsigned Reg, float Weight, bool IsSS = false) + : reg(Reg), weight(Weight), preference(0) { + if (IsSS) + reg = reg | (1U << (sizeof(unsigned)*CHAR_BIT-1)); + } + + typedef Ranges::iterator iterator; + iterator begin() { return ranges.begin(); } + iterator end() { return ranges.end(); } + + typedef Ranges::const_iterator const_iterator; + const_iterator begin() const { return ranges.begin(); } + const_iterator end() const { return ranges.end(); } + + typedef VNInfoList::iterator vni_iterator; + vni_iterator vni_begin() { return valnos.begin(); } + vni_iterator vni_end() { return valnos.end(); } + + typedef VNInfoList::const_iterator const_vni_iterator; + const_vni_iterator vni_begin() const { return valnos.begin(); } + const_vni_iterator vni_end() const { return valnos.end(); } + + /// advanceTo - Advance the specified iterator to point to the LiveRange + /// containing the specified position, or end() if the position is past the + /// end of the interval. If no LiveRange contains this position, but the + /// position is in a hole, this method returns an iterator pointing the the + /// LiveRange immediately after the hole. + iterator advanceTo(iterator I, unsigned Pos) { + if (Pos >= endNumber()) + return end(); + while (I->end <= Pos) ++I; + return I; + } + + void clear() { + while (!valnos.empty()) { + VNInfo *VNI = valnos.back(); + valnos.pop_back(); + VNI->~VNInfo(); + } + + ranges.clear(); + } + + /// isStackSlot - Return true if this is a stack slot interval. + /// + bool isStackSlot() const { + return reg & (1U << (sizeof(unsigned)*CHAR_BIT-1)); + } + + /// getStackSlotIndex - Return stack slot index if this is a stack slot + /// interval. + int getStackSlotIndex() const { + assert(isStackSlot() && "Interval is not a stack slot interval!"); + return reg & ~(1U << (sizeof(unsigned)*CHAR_BIT-1)); + } + + bool hasAtLeastOneValue() const { return !valnos.empty(); } + + bool containsOneValue() const { return valnos.size() == 1; } + + unsigned getNumValNums() const { return (unsigned)valnos.size(); } + + /// getValNumInfo - Returns pointer to the specified val#. + /// + inline VNInfo *getValNumInfo(unsigned ValNo) { + return valnos[ValNo]; + } + inline const VNInfo *getValNumInfo(unsigned ValNo) const { + return valnos[ValNo]; + } + + /// copyValNumInfo - Copy the value number info for one value number to + /// another. + void copyValNumInfo(VNInfo *DstValNo, const VNInfo *SrcValNo) { + DstValNo->def = SrcValNo->def; + DstValNo->copy = SrcValNo->copy; + DstValNo->hasPHIKill = SrcValNo->hasPHIKill; + DstValNo->redefByEC = SrcValNo->redefByEC; + DstValNo->kills = SrcValNo->kills; + } + + /// getNextValue - Create a new value number and return it. MIIdx specifies + /// the instruction that defines the value number. + VNInfo *getNextValue(unsigned MIIdx, MachineInstr *CopyMI, + BumpPtrAllocator &VNInfoAllocator) { +#ifdef __GNUC__ + unsigned Alignment = (unsigned)__alignof__(VNInfo); +#else + // FIXME: ugly. + unsigned Alignment = 8; +#endif + VNInfo *VNI = + static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo), + Alignment)); + new (VNI) VNInfo((unsigned)valnos.size(), MIIdx, CopyMI); + valnos.push_back(VNI); + return VNI; + } + + /// addKill - Add a kill instruction index to the specified value + /// number. + static void addKill(VNInfo *VNI, unsigned KillIdx) { + SmallVector<unsigned, 4> &kills = VNI->kills; + if (kills.empty()) { + kills.push_back(KillIdx); + } else { + SmallVector<unsigned, 4>::iterator + I = std::lower_bound(kills.begin(), kills.end(), KillIdx); + kills.insert(I, KillIdx); + } + } + + /// addKills - Add a number of kills into the VNInfo kill vector. If this + /// interval is live at a kill point, then the kill is not added. + void addKills(VNInfo *VNI, const SmallVector<unsigned, 4> &kills) { + for (unsigned i = 0, e = static_cast<unsigned>(kills.size()); + i != e; ++i) { + unsigned KillIdx = kills[i]; + if (!liveBeforeAndAt(KillIdx)) { + SmallVector<unsigned, 4>::iterator + I = std::lower_bound(VNI->kills.begin(), VNI->kills.end(), KillIdx); + VNI->kills.insert(I, KillIdx); + } + } + } + + /// removeKill - Remove the specified kill from the list of kills of + /// the specified val#. + static bool removeKill(VNInfo *VNI, unsigned KillIdx) { + SmallVector<unsigned, 4> &kills = VNI->kills; + SmallVector<unsigned, 4>::iterator + I = std::lower_bound(kills.begin(), kills.end(), KillIdx); + if (I != kills.end() && *I == KillIdx) { + kills.erase(I); + return true; + } + return false; + } + + /// removeKills - Remove all the kills in specified range + /// [Start, End] of the specified val#. + static void removeKills(VNInfo *VNI, unsigned Start, unsigned End) { + SmallVector<unsigned, 4> &kills = VNI->kills; + SmallVector<unsigned, 4>::iterator + I = std::lower_bound(kills.begin(), kills.end(), Start); + SmallVector<unsigned, 4>::iterator + E = std::upper_bound(kills.begin(), kills.end(), End); + kills.erase(I, E); + } + + /// isKill - Return true if the specified index is a kill of the + /// specified val#. + static bool isKill(const VNInfo *VNI, unsigned KillIdx) { + const SmallVector<unsigned, 4> &kills = VNI->kills; + SmallVector<unsigned, 4>::const_iterator + I = std::lower_bound(kills.begin(), kills.end(), KillIdx); + return I != kills.end() && *I == KillIdx; + } + + /// isOnlyLROfValNo - Return true if the specified live range is the only + /// one defined by the its val#. + bool isOnlyLROfValNo( const LiveRange *LR) { + for (const_iterator I = begin(), E = end(); I != E; ++I) { + const LiveRange *Tmp = I; + if (Tmp != LR && Tmp->valno == LR->valno) + return false; + } + return true; + } + + /// MergeValueNumberInto - This method is called when two value nubmers + /// are found to be equivalent. This eliminates V1, replacing all + /// LiveRanges with the V1 value number with the V2 value number. This can + /// cause merging of V1/V2 values numbers and compaction of the value space. + VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2); + + /// MergeInClobberRanges - For any live ranges that are not defined in the + /// current interval, but are defined in the Clobbers interval, mark them + /// used with an unknown definition value. Caller must pass in reference to + /// VNInfoAllocator since it will create a new val#. + void MergeInClobberRanges(const LiveInterval &Clobbers, + BumpPtrAllocator &VNInfoAllocator); + + /// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a + /// single LiveRange only. + void MergeInClobberRange(unsigned Start, unsigned End, + BumpPtrAllocator &VNInfoAllocator); + + /// MergeValueInAsValue - Merge all of the live ranges of a specific val# + /// in RHS into this live interval as the specified value number. + /// The LiveRanges in RHS are allowed to overlap with LiveRanges in the + /// current interval, it will replace the value numbers of the overlaped + /// live ranges with the specified value number. + void MergeRangesInAsValue(const LiveInterval &RHS, VNInfo *LHSValNo); + + /// MergeValueInAsValue - Merge all of the live ranges of a specific val# + /// in RHS into this live interval as the specified value number. + /// The LiveRanges in RHS are allowed to overlap with LiveRanges in the + /// current interval, but only if the overlapping LiveRanges have the + /// specified value number. + void MergeValueInAsValue(const LiveInterval &RHS, + const VNInfo *RHSValNo, VNInfo *LHSValNo); + + /// Copy - Copy the specified live interval. This copies all the fields + /// except for the register of the interval. + void Copy(const LiveInterval &RHS, BumpPtrAllocator &VNInfoAllocator); + + bool empty() const { return ranges.empty(); } + + /// beginNumber - Return the lowest numbered slot covered by interval. + unsigned beginNumber() const { + if (empty()) + return 0; + return ranges.front().start; + } + + /// endNumber - return the maximum point of the interval of the whole, + /// exclusive. + unsigned endNumber() const { + if (empty()) + return 0; + return ranges.back().end; + } + + bool expiredAt(unsigned index) const { + return index >= endNumber(); + } + + bool liveAt(unsigned index) const; + + // liveBeforeAndAt - Check if the interval is live at the index and the + // index just before it. If index is liveAt, check if it starts a new live + // range.If it does, then check if the previous live range ends at index-1. + bool liveBeforeAndAt(unsigned index) const; + + /// getLiveRangeContaining - Return the live range that contains the + /// specified index, or null if there is none. + const LiveRange *getLiveRangeContaining(unsigned Idx) const { + const_iterator I = FindLiveRangeContaining(Idx); + return I == end() ? 0 : &*I; + } + + /// FindLiveRangeContaining - Return an iterator to the live range that + /// contains the specified index, or end() if there is none. + const_iterator FindLiveRangeContaining(unsigned Idx) const; + + /// FindLiveRangeContaining - Return an iterator to the live range that + /// contains the specified index, or end() if there is none. + iterator FindLiveRangeContaining(unsigned Idx); + + /// findDefinedVNInfo - Find the VNInfo that's defined at the specified + /// index (register interval) or defined by the specified register (stack + /// inteval). + VNInfo *findDefinedVNInfo(unsigned DefIdxOrReg) const; + + /// overlaps - Return true if the intersection of the two live intervals is + /// not empty. + bool overlaps(const LiveInterval& other) const { + return overlapsFrom(other, other.begin()); + } + + /// overlaps - Return true if the live interval overlaps a range specified + /// by [Start, End). + bool overlaps(unsigned Start, unsigned End) const; + + /// overlapsFrom - Return true if the intersection of the two live intervals + /// is not empty. The specified iterator is a hint that we can begin + /// scanning the Other interval starting at I. + bool overlapsFrom(const LiveInterval& other, const_iterator I) const; + + /// addRange - Add the specified LiveRange to this interval, merging + /// intervals as appropriate. This returns an iterator to the inserted live + /// range (which may have grown since it was inserted. + void addRange(LiveRange LR) { + addRangeFrom(LR, ranges.begin()); + } + + /// join - Join two live intervals (this, and other) together. This applies + /// mappings to the value numbers in the LHS/RHS intervals as specified. If + /// the intervals are not joinable, this aborts. + void join(LiveInterval &Other, const int *ValNoAssignments, + const int *RHSValNoAssignments, + SmallVector<VNInfo*, 16> &NewVNInfo); + + /// isInOneLiveRange - Return true if the range specified is entirely in the + /// a single LiveRange of the live interval. + bool isInOneLiveRange(unsigned Start, unsigned End); + + /// removeRange - Remove the specified range from this interval. Note that + /// the range must be a single LiveRange in its entirety. + void removeRange(unsigned Start, unsigned End, bool RemoveDeadValNo = false); + + void removeRange(LiveRange LR, bool RemoveDeadValNo = false) { + removeRange(LR.start, LR.end, RemoveDeadValNo); + } + + /// removeValNo - Remove all the ranges defined by the specified value#. + /// Also remove the value# from value# list. + void removeValNo(VNInfo *ValNo); + + /// scaleNumbering - Renumber VNI and ranges to provide gaps for new + /// instructions. + void scaleNumbering(unsigned factor); + + /// getSize - Returns the sum of sizes of all the LiveRange's. + /// + unsigned getSize() const; + + bool operator<(const LiveInterval& other) const { + return beginNumber() < other.beginNumber(); + } + + void print(std::ostream &OS, const TargetRegisterInfo *TRI = 0) const; + void print(std::ostream *OS, const TargetRegisterInfo *TRI = 0) const { + if (OS) print(*OS, TRI); + } + void dump() const; + + private: + Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From); + void extendIntervalEndTo(Ranges::iterator I, unsigned NewEnd); + Ranges::iterator extendIntervalStartTo(Ranges::iterator I, unsigned NewStr); + LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT + }; + + inline std::ostream &operator<<(std::ostream &OS, const LiveInterval &LI) { + LI.print(OS); + return OS; + } +} + +#endif diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h new file mode 100644 index 0000000..7c44cc7 --- /dev/null +++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h @@ -0,0 +1,537 @@ +//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveInterval analysis pass. Given some numbering of +// each the machine instructions (in this implemention depth-first order) an +// interval [i, j) is said to be a live interval for register v if there is no +// instruction with number j' > j such that v is live at j' and there is no +// instruction with number i' < i such that v is live at i'. In this +// implementation intervals can have holes, i.e. an interval might look like +// [1,20), [50,65), [1000,1001). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H +#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/LiveInterval.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Allocator.h" +#include <cmath> + +namespace llvm { + + class AliasAnalysis; + class LiveVariables; + class MachineLoopInfo; + class TargetRegisterInfo; + class MachineRegisterInfo; + class TargetInstrInfo; + class TargetRegisterClass; + class VirtRegMap; + typedef std::pair<unsigned, MachineBasicBlock*> IdxMBBPair; + + inline bool operator<(unsigned V, const IdxMBBPair &IM) { + return V < IM.first; + } + + inline bool operator<(const IdxMBBPair &IM, unsigned V) { + return IM.first < V; + } + + struct Idx2MBBCompare { + bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const { + return LHS.first < RHS.first; + } + }; + + class LiveIntervals : public MachineFunctionPass { + MachineFunction* mf_; + MachineRegisterInfo* mri_; + const TargetMachine* tm_; + const TargetRegisterInfo* tri_; + const TargetInstrInfo* tii_; + AliasAnalysis *aa_; + LiveVariables* lv_; + + /// Special pool allocator for VNInfo's (LiveInterval val#). + /// + BumpPtrAllocator VNInfoAllocator; + + /// MBB2IdxMap - The indexes of the first and last instructions in the + /// specified basic block. + std::vector<std::pair<unsigned, unsigned> > MBB2IdxMap; + + /// Idx2MBBMap - Sorted list of pairs of index of first instruction + /// and MBB id. + std::vector<IdxMBBPair> Idx2MBBMap; + + /// FunctionSize - The number of instructions present in the function + uint64_t FunctionSize; + + typedef DenseMap<MachineInstr*, unsigned> Mi2IndexMap; + Mi2IndexMap mi2iMap_; + + typedef std::vector<MachineInstr*> Index2MiMap; + Index2MiMap i2miMap_; + + typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap; + Reg2IntervalMap r2iMap_; + + BitVector allocatableRegs_; + + std::vector<MachineInstr*> ClonedMIs; + + typedef LiveInterval::InstrSlots InstrSlots; + + public: + static char ID; // Pass identification, replacement for typeid + LiveIntervals() : MachineFunctionPass(&ID) {} + + static unsigned getBaseIndex(unsigned index) { + return index - (index % InstrSlots::NUM); + } + static unsigned getBoundaryIndex(unsigned index) { + return getBaseIndex(index + InstrSlots::NUM - 1); + } + static unsigned getLoadIndex(unsigned index) { + return getBaseIndex(index) + InstrSlots::LOAD; + } + static unsigned getUseIndex(unsigned index) { + return getBaseIndex(index) + InstrSlots::USE; + } + static unsigned getDefIndex(unsigned index) { + return getBaseIndex(index) + InstrSlots::DEF; + } + static unsigned getStoreIndex(unsigned index) { + return getBaseIndex(index) + InstrSlots::STORE; + } + + static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { + return (isDef + isUse) * powf(10.0F, (float)loopDepth); + } + + typedef Reg2IntervalMap::iterator iterator; + typedef Reg2IntervalMap::const_iterator const_iterator; + const_iterator begin() const { return r2iMap_.begin(); } + const_iterator end() const { return r2iMap_.end(); } + iterator begin() { return r2iMap_.begin(); } + iterator end() { return r2iMap_.end(); } + unsigned getNumIntervals() const { return (unsigned)r2iMap_.size(); } + + LiveInterval &getInterval(unsigned reg) { + Reg2IntervalMap::iterator I = r2iMap_.find(reg); + assert(I != r2iMap_.end() && "Interval does not exist for register"); + return *I->second; + } + + const LiveInterval &getInterval(unsigned reg) const { + Reg2IntervalMap::const_iterator I = r2iMap_.find(reg); + assert(I != r2iMap_.end() && "Interval does not exist for register"); + return *I->second; + } + + bool hasInterval(unsigned reg) const { + return r2iMap_.count(reg); + } + + /// getMBBStartIdx - Return the base index of the first instruction in the + /// specified MachineBasicBlock. + unsigned getMBBStartIdx(MachineBasicBlock *MBB) const { + return getMBBStartIdx(MBB->getNumber()); + } + unsigned getMBBStartIdx(unsigned MBBNo) const { + assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!"); + return MBB2IdxMap[MBBNo].first; + } + + /// getMBBEndIdx - Return the store index of the last instruction in the + /// specified MachineBasicBlock. + unsigned getMBBEndIdx(MachineBasicBlock *MBB) const { + return getMBBEndIdx(MBB->getNumber()); + } + unsigned getMBBEndIdx(unsigned MBBNo) const { + assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!"); + return MBB2IdxMap[MBBNo].second; + } + + /// getScaledIntervalSize - get the size of an interval in "units," + /// where every function is composed of one thousand units. This + /// measure scales properly with empty index slots in the function. + double getScaledIntervalSize(LiveInterval& I) { + return (1000.0 / InstrSlots::NUM * I.getSize()) / i2miMap_.size(); + } + + /// getApproximateInstructionCount - computes an estimate of the number + /// of instructions in a given LiveInterval. + unsigned getApproximateInstructionCount(LiveInterval& I) { + double IntervalPercentage = getScaledIntervalSize(I) / 1000.0; + return (unsigned)(IntervalPercentage * FunctionSize); + } + + /// getMBBFromIndex - given an index in any instruction of an + /// MBB return a pointer the MBB + MachineBasicBlock* getMBBFromIndex(unsigned index) const { + std::vector<IdxMBBPair>::const_iterator I = + std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), index); + // Take the pair containing the index + std::vector<IdxMBBPair>::const_iterator J = + ((I != Idx2MBBMap.end() && I->first > index) || + (I == Idx2MBBMap.end() && Idx2MBBMap.size()>0)) ? (I-1): I; + + assert(J != Idx2MBBMap.end() && J->first < index+1 && + index <= getMBBEndIdx(J->second) && + "index does not correspond to an MBB"); + return J->second; + } + + /// getInstructionIndex - returns the base index of instr + unsigned getInstructionIndex(MachineInstr* instr) const { + Mi2IndexMap::const_iterator it = mi2iMap_.find(instr); + assert(it != mi2iMap_.end() && "Invalid instruction!"); + return it->second; + } + + /// getInstructionFromIndex - given an index in any slot of an + /// instruction return a pointer the instruction + MachineInstr* getInstructionFromIndex(unsigned index) const { + index /= InstrSlots::NUM; // convert index to vector index + assert(index < i2miMap_.size() && + "index does not correspond to an instruction"); + return i2miMap_[index]; + } + + /// hasGapBeforeInstr - Return true if the previous instruction slot, + /// i.e. Index - InstrSlots::NUM, is not occupied. + bool hasGapBeforeInstr(unsigned Index) { + Index = getBaseIndex(Index - InstrSlots::NUM); + return getInstructionFromIndex(Index) == 0; + } + + /// hasGapAfterInstr - Return true if the successive instruction slot, + /// i.e. Index + InstrSlots::Num, is not occupied. + bool hasGapAfterInstr(unsigned Index) { + Index = getBaseIndex(Index + InstrSlots::NUM); + return getInstructionFromIndex(Index) == 0; + } + + /// findGapBeforeInstr - Find an empty instruction slot before the + /// specified index. If "Furthest" is true, find one that's furthest + /// away from the index (but before any index that's occupied). + unsigned findGapBeforeInstr(unsigned Index, bool Furthest = false) { + Index = getBaseIndex(Index - InstrSlots::NUM); + if (getInstructionFromIndex(Index)) + return 0; // No gap! + if (!Furthest) + return Index; + unsigned PrevIndex = getBaseIndex(Index - InstrSlots::NUM); + while (getInstructionFromIndex(Index)) { + Index = PrevIndex; + PrevIndex = getBaseIndex(Index - InstrSlots::NUM); + } + return Index; + } + + /// InsertMachineInstrInMaps - Insert the specified machine instruction + /// into the instruction index map at the given index. + void InsertMachineInstrInMaps(MachineInstr *MI, unsigned Index) { + i2miMap_[Index / InstrSlots::NUM] = MI; + Mi2IndexMap::iterator it = mi2iMap_.find(MI); + assert(it == mi2iMap_.end() && "Already in map!"); + mi2iMap_[MI] = Index; + } + + /// conflictsWithPhysRegDef - Returns true if the specified register + /// is defined during the duration of the specified interval. + bool conflictsWithPhysRegDef(const LiveInterval &li, VirtRegMap &vrm, + unsigned reg); + + /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except + /// it can check use as well. + bool conflictsWithPhysRegRef(LiveInterval &li, unsigned Reg, + bool CheckUse, + SmallPtrSet<MachineInstr*,32> &JoinedCopies); + + /// findLiveInMBBs - Given a live range, if the value of the range + /// is live in any MBB returns true as well as the list of basic blocks + /// in which the value is live. + bool findLiveInMBBs(unsigned Start, unsigned End, + SmallVectorImpl<MachineBasicBlock*> &MBBs) const; + + /// findReachableMBBs - Return a list MBB that can be reached via any + /// branch or fallthroughs. Return true if the list is not empty. + bool findReachableMBBs(unsigned Start, unsigned End, + SmallVectorImpl<MachineBasicBlock*> &MBBs) const; + + // Interval creation + + LiveInterval &getOrCreateInterval(unsigned reg) { + Reg2IntervalMap::iterator I = r2iMap_.find(reg); + if (I == r2iMap_.end()) + I = r2iMap_.insert(std::make_pair(reg, createInterval(reg))).first; + return *I->second; + } + + /// dupInterval - Duplicate a live interval. The caller is responsible for + /// managing the allocated memory. + LiveInterval *dupInterval(LiveInterval *li); + + /// addLiveRangeToEndOfBlock - Given a register and an instruction, + /// adds a live range from that instruction to the end of its MBB. + LiveRange addLiveRangeToEndOfBlock(unsigned reg, + MachineInstr* startInst); + + // Interval removal + + void removeInterval(unsigned Reg) { + DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.find(Reg); + delete I->second; + r2iMap_.erase(I); + } + + /// isNotInMIMap - returns true if the specified machine instr has been + /// removed or was never entered in the map. + bool isNotInMIMap(MachineInstr* instr) const { + return !mi2iMap_.count(instr); + } + + /// RemoveMachineInstrFromMaps - This marks the specified machine instr as + /// deleted. + void RemoveMachineInstrFromMaps(MachineInstr *MI) { + // remove index -> MachineInstr and + // MachineInstr -> index mappings + Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI); + if (mi2i != mi2iMap_.end()) { + i2miMap_[mi2i->second/InstrSlots::NUM] = 0; + mi2iMap_.erase(mi2i); + } + } + + /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in + /// maps used by register allocator. + void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) { + Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI); + if (mi2i == mi2iMap_.end()) + return; + i2miMap_[mi2i->second/InstrSlots::NUM] = NewMI; + Mi2IndexMap::iterator it = mi2iMap_.find(MI); + assert(it != mi2iMap_.end() && "Invalid instruction!"); + unsigned Index = it->second; + mi2iMap_.erase(it); + mi2iMap_[NewMI] = Index; + } + + BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; } + + /// getVNInfoSourceReg - Helper function that parses the specified VNInfo + /// copy field and returns the source register that defines it. + unsigned getVNInfoSourceReg(const VNInfo *VNI) const; + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + virtual void releaseMemory(); + + /// runOnMachineFunction - pass entry point + virtual bool runOnMachineFunction(MachineFunction&); + + /// print - Implement the dump method. + virtual void print(std::ostream &O, const Module* = 0) const; + void print(std::ostream *O, const Module* M = 0) const { + if (O) print(*O, M); + } + + /// addIntervalsForSpills - Create new intervals for spilled defs / uses of + /// the given interval. FIXME: It also returns the weight of the spill slot + /// (if any is created) by reference. This is temporary. + std::vector<LiveInterval*> + addIntervalsForSpills(const LiveInterval& i, + SmallVectorImpl<LiveInterval*> &SpillIs, + const MachineLoopInfo *loopInfo, VirtRegMap& vrm); + + /// addIntervalsForSpillsFast - Quickly create new intervals for spilled + /// defs / uses without remat or splitting. + std::vector<LiveInterval*> + addIntervalsForSpillsFast(const LiveInterval &li, + const MachineLoopInfo *loopInfo, VirtRegMap &vrm); + + /// spillPhysRegAroundRegDefsUses - Spill the specified physical register + /// around all defs and uses of the specified interval. Return true if it + /// was able to cut its interval. + bool spillPhysRegAroundRegDefsUses(const LiveInterval &li, + unsigned PhysReg, VirtRegMap &vrm); + + /// isReMaterializable - Returns true if every definition of MI of every + /// val# of the specified interval is re-materializable. Also returns true + /// by reference if all of the defs are load instructions. + bool isReMaterializable(const LiveInterval &li, + SmallVectorImpl<LiveInterval*> &SpillIs, + bool &isLoad); + + /// isReMaterializable - Returns true if the definition MI of the specified + /// val# of the specified interval is re-materializable. + bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo, + MachineInstr *MI); + + /// getRepresentativeReg - Find the largest super register of the specified + /// physical register. + unsigned getRepresentativeReg(unsigned Reg) const; + + /// getNumConflictsWithPhysReg - Return the number of uses and defs of the + /// specified interval that conflicts with the specified physical register. + unsigned getNumConflictsWithPhysReg(const LiveInterval &li, + unsigned PhysReg) const; + + /// computeNumbering - Compute the index numbering. + void computeNumbering(); + + /// scaleNumbering - Rescale interval numbers to introduce gaps for new + /// instructions + void scaleNumbering(int factor); + + /// intervalIsInOneMBB - Returns true if the specified interval is entirely + /// within a single basic block. + bool intervalIsInOneMBB(const LiveInterval &li) const; + + private: + /// computeIntervals - Compute live intervals. + void computeIntervals(); + + /// handleRegisterDef - update intervals for a register def + /// (calls handlePhysicalRegisterDef and + /// handleVirtualRegisterDef) + void handleRegisterDef(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MI, unsigned MIIdx, + MachineOperand& MO, unsigned MOIdx); + + /// handleVirtualRegisterDef - update intervals for a virtual + /// register def + void handleVirtualRegisterDef(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MI, + unsigned MIIdx, MachineOperand& MO, + unsigned MOIdx, LiveInterval& interval); + + /// handlePhysicalRegisterDef - update intervals for a physical register + /// def. + void handlePhysicalRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned MIIdx, MachineOperand& MO, + LiveInterval &interval, + MachineInstr *CopyMI); + + /// handleLiveInRegister - Create interval for a livein register. + void handleLiveInRegister(MachineBasicBlock* mbb, + unsigned MIIdx, + LiveInterval &interval, bool isAlias = false); + + /// getReMatImplicitUse - If the remat definition MI has one (for now, we + /// only allow one) virtual register operand, then its uses are implicitly + /// using the register. Returns the virtual register. + unsigned getReMatImplicitUse(const LiveInterval &li, + MachineInstr *MI) const; + + /// isValNoAvailableAt - Return true if the val# of the specified interval + /// which reaches the given instruction also reaches the specified use + /// index. + bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, + unsigned UseIdx) const; + + /// isReMaterializable - Returns true if the definition MI of the specified + /// val# of the specified interval is re-materializable. Also returns true + /// by reference if the def is a load. + bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo, + MachineInstr *MI, + SmallVectorImpl<LiveInterval*> &SpillIs, + bool &isLoad); + + /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from + /// slot / to reg or any rematerialized load into ith operand of specified + /// MI. If it is successul, MI is updated with the newly created MI and + /// returns true. + bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm, + MachineInstr *DefMI, unsigned InstrIdx, + SmallVector<unsigned, 2> &Ops, + bool isSS, int Slot, unsigned Reg); + + /// canFoldMemoryOperand - Return true if the specified load / store + /// folding is possible. + bool canFoldMemoryOperand(MachineInstr *MI, + SmallVector<unsigned, 2> &Ops, + bool ReMatLoadSS) const; + + /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified + /// VNInfo that's after the specified index but is within the basic block. + bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI, + MachineBasicBlock *MBB, unsigned Idx) const; + + /// hasAllocatableSuperReg - Return true if the specified physical register + /// has any super register that's allocatable. + bool hasAllocatableSuperReg(unsigned Reg) const; + + /// SRInfo - Spill / restore info. + struct SRInfo { + int index; + unsigned vreg; + bool canFold; + SRInfo(int i, unsigned vr, bool f) : index(i), vreg(vr), canFold(f) {}; + }; + + bool alsoFoldARestore(int Id, int index, unsigned vr, + BitVector &RestoreMBBs, + DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes); + void eraseRestoreInfo(int Id, int index, unsigned vr, + BitVector &RestoreMBBs, + DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes); + + /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being + /// spilled and create empty intervals for their uses. + void handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, + const TargetRegisterClass* rc, + std::vector<LiveInterval*> &NewLIs); + + /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of + /// interval on to-be re-materialized operands of MI) with new register. + void rewriteImplicitOps(const LiveInterval &li, + MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm); + + /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper + /// functions for addIntervalsForSpills to rewrite uses / defs for the given + /// live range. + bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, + bool TrySplit, unsigned index, unsigned end, MachineInstr *MI, + MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot, + bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, + VirtRegMap &vrm, const TargetRegisterClass* rc, + SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo, + unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, + DenseMap<unsigned,unsigned> &MBBVRegsMap, + std::vector<LiveInterval*> &NewLIs); + void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, + LiveInterval::Ranges::const_iterator &I, + MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot, + bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, + VirtRegMap &vrm, const TargetRegisterClass* rc, + SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo, + BitVector &SpillMBBs, + DenseMap<unsigned,std::vector<SRInfo> > &SpillIdxes, + BitVector &RestoreMBBs, + DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes, + DenseMap<unsigned,unsigned> &MBBVRegsMap, + std::vector<LiveInterval*> &NewLIs); + + static LiveInterval* createInterval(unsigned Reg); + + void printRegName(unsigned reg) const; + }; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/LiveStackAnalysis.h b/include/llvm/CodeGen/LiveStackAnalysis.h new file mode 100644 index 0000000..27ae1be --- /dev/null +++ b/include/llvm/CodeGen/LiveStackAnalysis.h @@ -0,0 +1,112 @@ +//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the live stack slot analysis pass. It is analogous to +// live interval analysis except it's analyzing liveness of stack slots rather +// than registers. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVESTACK_ANALYSIS_H +#define LLVM_CODEGEN_LIVESTACK_ANALYSIS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/LiveInterval.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Support/Allocator.h" +#include <map> + +namespace llvm { + + class LiveStacks : public MachineFunctionPass { + /// Special pool allocator for VNInfo's (LiveInterval val#). + /// + BumpPtrAllocator VNInfoAllocator; + + /// S2IMap - Stack slot indices to live interval mapping. + /// + typedef std::map<int, LiveInterval> SS2IntervalMap; + SS2IntervalMap S2IMap; + + /// S2RCMap - Stack slot indices to register class mapping. + std::map<int, const TargetRegisterClass*> S2RCMap; + + public: + static char ID; // Pass identification, replacement for typeid + LiveStacks() : MachineFunctionPass(&ID) {} + + typedef SS2IntervalMap::iterator iterator; + typedef SS2IntervalMap::const_iterator const_iterator; + const_iterator begin() const { return S2IMap.begin(); } + const_iterator end() const { return S2IMap.end(); } + iterator begin() { return S2IMap.begin(); } + iterator end() { return S2IMap.end(); } + + void scaleNumbering(int factor); + + unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); } + + LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC) { + assert(Slot >= 0 && "Spill slot indice must be >= 0"); + SS2IntervalMap::iterator I = S2IMap.find(Slot); + if (I == S2IMap.end()) { + I = S2IMap.insert(I,std::make_pair(Slot, LiveInterval(Slot,0.0F,true))); + S2RCMap.insert(std::make_pair(Slot, RC)); + } else { + // Use the largest common subclass register class. + const TargetRegisterClass *OldRC = S2RCMap[Slot]; + S2RCMap[Slot] = getCommonSubClass(OldRC, RC); + } + return I->second; + } + + LiveInterval &getInterval(int Slot) { + assert(Slot >= 0 && "Spill slot indice must be >= 0"); + SS2IntervalMap::iterator I = S2IMap.find(Slot); + assert(I != S2IMap.end() && "Interval does not exist for stack slot"); + return I->second; + } + + const LiveInterval &getInterval(int Slot) const { + assert(Slot >= 0 && "Spill slot indice must be >= 0"); + SS2IntervalMap::const_iterator I = S2IMap.find(Slot); + assert(I != S2IMap.end() && "Interval does not exist for stack slot"); + return I->second; + } + + bool hasInterval(int Slot) const { + return S2IMap.count(Slot); + } + + const TargetRegisterClass *getIntervalRegClass(int Slot) const { + assert(Slot >= 0 && "Spill slot indice must be >= 0"); + std::map<int, const TargetRegisterClass*>::const_iterator + I = S2RCMap.find(Slot); + assert(I != S2RCMap.end() && + "Register class info does not exist for stack slot"); + return I->second; + } + + BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + virtual void releaseMemory(); + + /// runOnMachineFunction - pass entry point + virtual bool runOnMachineFunction(MachineFunction&); + + /// print - Implement the dump method. + virtual void print(std::ostream &O, const Module* = 0) const; + void print(std::ostream *O, const Module* M = 0) const { + if (O) print(*O, M); + } + }; +} + +#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */ diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h new file mode 100644 index 0000000..26c0362 --- /dev/null +++ b/include/llvm/CodeGen/LiveVariables.h @@ -0,0 +1,269 @@ +//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveVariables analysis pass. For each machine +// instruction in the function, this pass calculates the set of registers that +// are immediately dead after the instruction (i.e., the instruction calculates +// the value, but it is never used) and the set of registers that are used by +// the instruction, but are never used after the instruction (i.e., they are +// killed). +// +// This class computes live variables using a sparse implementation based on +// the machine code SSA form. This class computes live variable information for +// each virtual and _register allocatable_ physical register in a function. It +// uses the dominance properties of SSA form to efficiently compute live +// variables for virtual registers, and assumes that physical registers are only +// live within a single basic block (allowing it to do a single local analysis +// to resolve physical register lifetimes in each basic block). If a physical +// register is not register allocatable, it is not tracked. This is useful for +// things like the stack pointer and condition codes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVEVARIABLES_H +#define LLVM_CODEGEN_LIVEVARIABLES_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/SparseBitVector.h" + +namespace llvm { + +class MachineRegisterInfo; +class TargetRegisterInfo; + +class LiveVariables : public MachineFunctionPass { +public: + static char ID; // Pass identification, replacement for typeid + LiveVariables() : MachineFunctionPass(&ID) {} + + /// VarInfo - This represents the regions where a virtual register is live in + /// the program. We represent this with three different pieces of + /// information: the set of blocks in which the instruction is live + /// throughout, the set of blocks in which the instruction is actually used, + /// and the set of non-phi instructions that are the last users of the value. + /// + /// In the common case where a value is defined and killed in the same block, + /// There is one killing instruction, and AliveBlocks is empty. + /// + /// Otherwise, the value is live out of the block. If the value is live + /// throughout any blocks, these blocks are listed in AliveBlocks. Blocks + /// where the liveness range ends are not included in AliveBlocks, instead + /// being captured by the Kills set. In these blocks, the value is live into + /// the block (unless the value is defined and killed in the same block) and + /// lives until the specified instruction. Note that there cannot ever be a + /// value whose Kills set contains two instructions from the same basic block. + /// + /// PHI nodes complicate things a bit. If a PHI node is the last user of a + /// value in one of its predecessor blocks, it is not listed in the kills set, + /// but does include the predecessor block in the AliveBlocks set (unless that + /// block also defines the value). This leads to the (perfectly sensical) + /// situation where a value is defined in a block, and the last use is a phi + /// node in the successor. In this case, AliveBlocks is empty (the value is + /// not live across any blocks) and Kills is empty (phi nodes are not + /// included). This is sensical because the value must be live to the end of + /// the block, but is not live in any successor blocks. + struct VarInfo { + /// AliveBlocks - Set of blocks in which this value is alive completely + /// through. This is a bit set which uses the basic block number as an + /// index. + /// + SparseBitVector<> AliveBlocks; + + /// NumUses - Number of uses of this register across the entire function. + /// + unsigned NumUses; + + /// Kills - List of MachineInstruction's which are the last use of this + /// virtual register (kill it) in their basic block. + /// + std::vector<MachineInstr*> Kills; + + VarInfo() : NumUses(0) {} + + /// removeKill - Delete a kill corresponding to the specified + /// machine instruction. Returns true if there was a kill + /// corresponding to this instruction, false otherwise. + bool removeKill(MachineInstr *MI) { + std::vector<MachineInstr*>::iterator + I = std::find(Kills.begin(), Kills.end(), MI); + if (I == Kills.end()) + return false; + Kills.erase(I); + return true; + } + + void dump() const; + }; + +private: + /// VirtRegInfo - This list is a mapping from virtual register number to + /// variable information. FirstVirtualRegister is subtracted from the virtual + /// register number before indexing into this list. + /// + std::vector<VarInfo> VirtRegInfo; + + /// ReservedRegisters - This vector keeps track of which registers + /// are reserved register which are not allocatable by the target machine. + /// We can not track liveness for values that are in this set. + /// + BitVector ReservedRegisters; + +private: // Intermediate data structures + MachineFunction *MF; + + MachineRegisterInfo* MRI; + + const TargetRegisterInfo *TRI; + + // PhysRegInfo - Keep track of which instruction was the last def of a + // physical register. This is a purely local property, because all physical + // register references are presumed dead across basic blocks. + MachineInstr **PhysRegDef; + + // PhysRegInfo - Keep track of which instruction was the last use of a + // physical register. This is a purely local property, because all physical + // register references are presumed dead across basic blocks. + MachineInstr **PhysRegUse; + + SmallVector<unsigned, 4> *PHIVarInfo; + + // DistanceMap - Keep track the distance of a MI from the start of the + // current basic block. + DenseMap<MachineInstr*, unsigned> DistanceMap; + + /// HandlePhysRegKill - Add kills of Reg and its sub-registers to the + /// uses. Pay special attention to the sub-register uses which may come below + /// the last use of the whole register. + bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI); + + void HandlePhysRegUse(unsigned Reg, MachineInstr *MI); + void HandlePhysRegDef(unsigned Reg, MachineInstr *MI); + + /// FindLastPartialDef - Return the last partial def of the specified register. + /// Also returns the sub-register that's defined. + MachineInstr *FindLastPartialDef(unsigned Reg, unsigned &PartDefReg); + + /// hasRegisterUseBelow - Return true if the specified register is used after + /// the current instruction and before it's next definition. + bool hasRegisterUseBelow(unsigned Reg, MachineBasicBlock::iterator I, + MachineBasicBlock *MBB); + + /// analyzePHINodes - Gather information about the PHI nodes in here. In + /// particular, we want to map the variable information of a virtual + /// register which is used in a PHI node. We map that to the BB the vreg + /// is coming from. + void analyzePHINodes(const MachineFunction& Fn); +public: + + virtual bool runOnMachineFunction(MachineFunction &MF); + + /// RegisterDefIsDead - Return true if the specified instruction defines the + /// specified register, but that definition is dead. + bool RegisterDefIsDead(MachineInstr *MI, unsigned Reg) const; + + //===--------------------------------------------------------------------===// + // API to update live variable information + + /// replaceKillInstruction - Update register kill info by replacing a kill + /// instruction with a new one. + void replaceKillInstruction(unsigned Reg, MachineInstr *OldMI, + MachineInstr *NewMI); + + /// addVirtualRegisterKilled - Add information about the fact that the + /// specified register is killed after being used by the specified + /// instruction. If AddIfNotFound is true, add a implicit operand if it's + /// not found. + void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr *MI, + bool AddIfNotFound = false) { + if (MI->addRegisterKilled(IncomingReg, TRI, AddIfNotFound)) + getVarInfo(IncomingReg).Kills.push_back(MI); + } + + /// removeVirtualRegisterKilled - Remove the specified kill of the virtual + /// register from the live variable information. Returns true if the + /// variable was marked as killed by the specified instruction, + /// false otherwise. + bool removeVirtualRegisterKilled(unsigned reg, MachineInstr *MI) { + if (!getVarInfo(reg).removeKill(MI)) + return false; + + bool Removed = false; + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (MO.isReg() && MO.isKill() && MO.getReg() == reg) { + MO.setIsKill(false); + Removed = true; + break; + } + } + + assert(Removed && "Register is not used by this instruction!"); + return true; + } + + /// removeVirtualRegistersKilled - Remove all killed info for the specified + /// instruction. + void removeVirtualRegistersKilled(MachineInstr *MI); + + /// addVirtualRegisterDead - Add information about the fact that the specified + /// register is dead after being used by the specified instruction. If + /// AddIfNotFound is true, add a implicit operand if it's not found. + void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr *MI, + bool AddIfNotFound = false) { + if (MI->addRegisterDead(IncomingReg, TRI, AddIfNotFound)) + getVarInfo(IncomingReg).Kills.push_back(MI); + } + + /// removeVirtualRegisterDead - Remove the specified kill of the virtual + /// register from the live variable information. Returns true if the + /// variable was marked dead at the specified instruction, false + /// otherwise. + bool removeVirtualRegisterDead(unsigned reg, MachineInstr *MI) { + if (!getVarInfo(reg).removeKill(MI)) + return false; + + bool Removed = false; + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (MO.isReg() && MO.isDef() && MO.getReg() == reg) { + MO.setIsDead(false); + Removed = true; + break; + } + } + assert(Removed && "Register is not defined by this instruction!"); + return true; + } + + void getAnalysisUsage(AnalysisUsage &AU) const; + + virtual void releaseMemory() { + VirtRegInfo.clear(); + } + + /// getVarInfo - Return the VarInfo structure for the specified VIRTUAL + /// register. + VarInfo &getVarInfo(unsigned RegIdx); + + void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock, + MachineBasicBlock *BB); + void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock, + MachineBasicBlock *BB, + std::vector<MachineBasicBlock*> &WorkList); + void HandleVirtRegDef(unsigned reg, MachineInstr *MI); + void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB, + MachineInstr *MI); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachORelocation.h b/include/llvm/CodeGen/MachORelocation.h new file mode 100644 index 0000000..d4027cc --- /dev/null +++ b/include/llvm/CodeGen/MachORelocation.h @@ -0,0 +1,54 @@ +//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MachORelocation class. +// +//===----------------------------------------------------------------------===// + + +#ifndef LLVM_CODEGEN_MACHO_RELOCATION_H +#define LLVM_CODEGEN_MACHO_RELOCATION_H + +namespace llvm { + + /// MachORelocation - This struct contains information about each relocation + /// that needs to be emitted to the file. + /// see <mach-o/reloc.h> + class MachORelocation { + uint32_t r_address; // offset in the section to what is being relocated + uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index + bool r_pcrel; // was relocated pc-relative already + uint8_t r_length; // length = 2 ^ r_length + bool r_extern; // + uint8_t r_type; // if not 0, machine-specific relocation type. + bool r_scattered; // 1 = scattered, 0 = non-scattered + int32_t r_value; // the value the item to be relocated is referring + // to. + public: + uint32_t getPackedFields() const { + if (r_scattered) + return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) | + ((r_type & 15) << 24) | (r_address & 0x00FFFFFF); + else + return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) | + (r_extern << 4) | (r_type & 15); + } + uint32_t getAddress() const { return r_scattered ? r_value : r_address; } + uint32_t getRawAddress() const { return r_address; } + + MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len, + bool ext, uint8_t type, bool scattered = false, + int32_t value = 0) : + r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len), + r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {} + }; + +} // end llvm namespace + +#endif // LLVM_CODEGEN_MACHO_RELOCATION_H diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h new file mode 100644 index 0000000..134d226 --- /dev/null +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -0,0 +1,414 @@ +//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Collect the sequence of machine instructions for a basic block. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H +#define LLVM_CODEGEN_MACHINEBASICBLOCK_H + +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/Support/Streams.h" + +namespace llvm { + +class BasicBlock; +class MachineFunction; + +template <> +struct ilist_traits<MachineInstr> : public ilist_default_traits<MachineInstr> { +private: + mutable ilist_node<MachineInstr> Sentinel; + + // this is only set by the MachineBasicBlock owning the LiveList + friend class MachineBasicBlock; + MachineBasicBlock* Parent; + +public: + MachineInstr *createSentinel() const { + return static_cast<MachineInstr*>(&Sentinel); + } + void destroySentinel(MachineInstr *) const {} + + MachineInstr *provideInitialHead() const { return createSentinel(); } + MachineInstr *ensureHead(MachineInstr*) const { return createSentinel(); } + static void noteHead(MachineInstr*, MachineInstr*) {} + + void addNodeToList(MachineInstr* N); + void removeNodeFromList(MachineInstr* N); + void transferNodesFromList(ilist_traits &SrcTraits, + ilist_iterator<MachineInstr> first, + ilist_iterator<MachineInstr> last); + void deleteNode(MachineInstr *N); +private: + void createNode(const MachineInstr &); +}; + +class MachineBasicBlock : public ilist_node<MachineBasicBlock> { + typedef ilist<MachineInstr> Instructions; + Instructions Insts; + const BasicBlock *BB; + int Number; + MachineFunction *xParent; + + /// Predecessors/Successors - Keep track of the predecessor / successor + /// basicblocks. + std::vector<MachineBasicBlock *> Predecessors; + std::vector<MachineBasicBlock *> Successors; + + /// LiveIns - Keep track of the physical registers that are livein of + /// the basicblock. + std::vector<unsigned> LiveIns; + + /// Alignment - Alignment of the basic block. Zero if the basic block does + /// not need to be aligned. + unsigned Alignment; + + /// IsLandingPad - Indicate that this basic block is entered via an + /// exception handler. + bool IsLandingPad; + + // Intrusive list support + MachineBasicBlock() {} + + explicit MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb); + + ~MachineBasicBlock(); + + // MachineBasicBlocks are allocated and owned by MachineFunction. + friend class MachineFunction; + +public: + /// getBasicBlock - Return the LLVM basic block that this instance + /// corresponded to originally. + /// + const BasicBlock *getBasicBlock() const { return BB; } + + /// getParent - Return the MachineFunction containing this basic block. + /// + const MachineFunction *getParent() const { return xParent; } + MachineFunction *getParent() { return xParent; } + + typedef Instructions::iterator iterator; + typedef Instructions::const_iterator const_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + + unsigned size() const { return (unsigned)Insts.size(); } + bool empty() const { return Insts.empty(); } + + MachineInstr& front() { return Insts.front(); } + MachineInstr& back() { return Insts.back(); } + const MachineInstr& front() const { return Insts.front(); } + const MachineInstr& back() const { return Insts.back(); } + + iterator begin() { return Insts.begin(); } + const_iterator begin() const { return Insts.begin(); } + iterator end() { return Insts.end(); } + const_iterator end() const { return Insts.end(); } + reverse_iterator rbegin() { return Insts.rbegin(); } + const_reverse_iterator rbegin() const { return Insts.rbegin(); } + reverse_iterator rend () { return Insts.rend(); } + const_reverse_iterator rend () const { return Insts.rend(); } + + // Machine-CFG iterators + typedef std::vector<MachineBasicBlock *>::iterator pred_iterator; + typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator; + typedef std::vector<MachineBasicBlock *>::iterator succ_iterator; + typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator; + typedef std::vector<MachineBasicBlock *>::reverse_iterator + pred_reverse_iterator; + typedef std::vector<MachineBasicBlock *>::const_reverse_iterator + const_pred_reverse_iterator; + typedef std::vector<MachineBasicBlock *>::reverse_iterator + succ_reverse_iterator; + typedef std::vector<MachineBasicBlock *>::const_reverse_iterator + const_succ_reverse_iterator; + + pred_iterator pred_begin() { return Predecessors.begin(); } + const_pred_iterator pred_begin() const { return Predecessors.begin(); } + pred_iterator pred_end() { return Predecessors.end(); } + const_pred_iterator pred_end() const { return Predecessors.end(); } + pred_reverse_iterator pred_rbegin() + { return Predecessors.rbegin();} + const_pred_reverse_iterator pred_rbegin() const + { return Predecessors.rbegin();} + pred_reverse_iterator pred_rend() + { return Predecessors.rend(); } + const_pred_reverse_iterator pred_rend() const + { return Predecessors.rend(); } + unsigned pred_size() const { + return (unsigned)Predecessors.size(); + } + bool pred_empty() const { return Predecessors.empty(); } + succ_iterator succ_begin() { return Successors.begin(); } + const_succ_iterator succ_begin() const { return Successors.begin(); } + succ_iterator succ_end() { return Successors.end(); } + const_succ_iterator succ_end() const { return Successors.end(); } + succ_reverse_iterator succ_rbegin() + { return Successors.rbegin(); } + const_succ_reverse_iterator succ_rbegin() const + { return Successors.rbegin(); } + succ_reverse_iterator succ_rend() + { return Successors.rend(); } + const_succ_reverse_iterator succ_rend() const + { return Successors.rend(); } + unsigned succ_size() const { + return (unsigned)Successors.size(); + } + bool succ_empty() const { return Successors.empty(); } + + // LiveIn management methods. + + /// addLiveIn - Add the specified register as a live in. Note that it + /// is an error to add the same register to the same set more than once. + void addLiveIn(unsigned Reg) { LiveIns.push_back(Reg); } + + /// removeLiveIn - Remove the specified register from the live in set. + /// + void removeLiveIn(unsigned Reg); + + /// isLiveIn - Return true if the specified register is in the live in set. + /// + bool isLiveIn(unsigned Reg) const; + + // Iteration support for live in sets. These sets are kept in sorted + // order by their register number. + typedef std::vector<unsigned>::iterator livein_iterator; + typedef std::vector<unsigned>::const_iterator const_livein_iterator; + livein_iterator livein_begin() { return LiveIns.begin(); } + const_livein_iterator livein_begin() const { return LiveIns.begin(); } + livein_iterator livein_end() { return LiveIns.end(); } + const_livein_iterator livein_end() const { return LiveIns.end(); } + bool livein_empty() const { return LiveIns.empty(); } + + /// getAlignment - Return alignment of the basic block. + /// + unsigned getAlignment() const { return Alignment; } + + /// setAlignment - Set alignment of the basic block. + /// + void setAlignment(unsigned Align) { Alignment = Align; } + + /// isLandingPad - Returns true if the block is a landing pad. That is + /// this basic block is entered via an exception handler. + bool isLandingPad() const { return IsLandingPad; } + + /// setIsLandingPad - Indicates the block is a landing pad. That is + /// this basic block is entered via an exception handler. + void setIsLandingPad() { IsLandingPad = true; } + + // Code Layout methods. + + /// moveBefore/moveAfter - move 'this' block before or after the specified + /// block. This only moves the block, it does not modify the CFG or adjust + /// potential fall-throughs at the end of the block. + void moveBefore(MachineBasicBlock *NewAfter); + void moveAfter(MachineBasicBlock *NewBefore); + + // Machine-CFG mutators + + /// addSuccessor - Add succ as a successor of this MachineBasicBlock. + /// The Predecessors list of succ is automatically updated. + /// + void addSuccessor(MachineBasicBlock *succ); + + /// removeSuccessor - Remove successor from the successors list of this + /// MachineBasicBlock. The Predecessors list of succ is automatically updated. + /// + void removeSuccessor(MachineBasicBlock *succ); + + /// removeSuccessor - Remove specified successor from the successors list of + /// this MachineBasicBlock. The Predecessors list of succ is automatically + /// updated. Return the iterator to the element after the one removed. + /// + succ_iterator removeSuccessor(succ_iterator I); + + /// transferSuccessors - Transfers all the successors from MBB to this + /// machine basic block (i.e., copies all the successors fromMBB and + /// remove all the successors fromBB). + void transferSuccessors(MachineBasicBlock *fromMBB); + + /// isSuccessor - Return true if the specified MBB is a successor of this + /// block. + bool isSuccessor(const MachineBasicBlock *MBB) const; + + /// isLayoutSuccessor - Return true if the specified MBB will be emitted + /// immediately after this block, such that if this block exits by + /// falling through, control will transfer to the specified MBB. Note + /// that MBB need not be a successor at all, for example if this block + /// ends with an unconditional branch to some other block. + bool isLayoutSuccessor(const MachineBasicBlock *MBB) const; + + /// getFirstTerminator - returns an iterator to the first terminator + /// instruction of this basic block. If a terminator does not exist, + /// it returns end() + iterator getFirstTerminator(); + + /// isOnlyReachableViaFallthough - Return true if this basic block has + /// exactly one predecessor and the control transfer mechanism between + /// the predecessor and this block is a fall-through. + bool isOnlyReachableByFallthrough() const; + + void pop_front() { Insts.pop_front(); } + void pop_back() { Insts.pop_back(); } + void push_back(MachineInstr *MI) { Insts.push_back(MI); } + template<typename IT> + void insert(iterator I, IT S, IT E) { Insts.insert(I, S, E); } + iterator insert(iterator I, MachineInstr *M) { return Insts.insert(I, M); } + + // erase - Remove the specified element or range from the instruction list. + // These functions delete any instructions removed. + // + iterator erase(iterator I) { return Insts.erase(I); } + iterator erase(iterator I, iterator E) { return Insts.erase(I, E); } + MachineInstr *remove(MachineInstr *I) { return Insts.remove(I); } + void clear() { Insts.clear(); } + + /// splice - Take an instruction from MBB 'Other' at the position From, + /// and insert it into this MBB right before 'where'. + void splice(iterator where, MachineBasicBlock *Other, iterator From) { + Insts.splice(where, Other->Insts, From); + } + + /// splice - Take a block of instructions from MBB 'Other' in the range [From, + /// To), and insert them into this MBB right before 'where'. + void splice(iterator where, MachineBasicBlock *Other, iterator From, + iterator To) { + Insts.splice(where, Other->Insts, From, To); + } + + /// removeFromParent - This method unlinks 'this' from the containing + /// function, and returns it, but does not delete it. + MachineBasicBlock *removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing + /// function and deletes it. + void eraseFromParent(); + + /// ReplaceUsesOfBlockWith - Given a machine basic block that branched to + /// 'Old', change the code and CFG so that it branches to 'New' instead. + void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New); + + /// CorrectExtraCFGEdges - Various pieces of code can cause excess edges in + /// the CFG to be inserted. If we have proven that MBB can only branch to + /// DestA and DestB, remove any other MBB successors from the CFG. DestA and + /// DestB can be null. Besides DestA and DestB, retain other edges leading + /// to LandingPads (currently there can be only one; we don't check or require + /// that here). Note it is possible that DestA and/or DestB are LandingPads. + bool CorrectExtraCFGEdges(MachineBasicBlock *DestA, + MachineBasicBlock *DestB, + bool isCond); + + // Debugging methods. + void dump() const; + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + /// getNumber - MachineBasicBlocks are uniquely numbered at the function + /// level, unless they're not in a MachineFunction yet, in which case this + /// will return -1. + /// + int getNumber() const { return Number; } + void setNumber(int N) { Number = N; } + +private: // Methods used to maintain doubly linked list of blocks... + friend struct ilist_traits<MachineBasicBlock>; + + // Machine-CFG mutators + + /// addPredecessor - Remove pred as a predecessor of this MachineBasicBlock. + /// Don't do this unless you know what you're doing, because it doesn't + /// update pred's successors list. Use pred->addSuccessor instead. + /// + void addPredecessor(MachineBasicBlock *pred); + + /// removePredecessor - Remove pred as a predecessor of this + /// MachineBasicBlock. Don't do this unless you know what you're + /// doing, because it doesn't update pred's successors list. Use + /// pred->removeSuccessor instead. + /// + void removePredecessor(MachineBasicBlock *pred); +}; + +std::ostream& operator<<(std::ostream &OS, const MachineBasicBlock &MBB); + +//===--------------------------------------------------------------------===// +// GraphTraits specializations for machine basic block graphs (machine-CFGs) +//===--------------------------------------------------------------------===// + +// Provide specializations of GraphTraits to be able to treat a +// MachineFunction as a graph of MachineBasicBlocks... +// + +template <> struct GraphTraits<MachineBasicBlock *> { + typedef MachineBasicBlock NodeType; + typedef MachineBasicBlock::succ_iterator ChildIteratorType; + + static NodeType *getEntryNode(MachineBasicBlock *BB) { return BB; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->succ_begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->succ_end(); + } +}; + +template <> struct GraphTraits<const MachineBasicBlock *> { + typedef const MachineBasicBlock NodeType; + typedef MachineBasicBlock::const_succ_iterator ChildIteratorType; + + static NodeType *getEntryNode(const MachineBasicBlock *BB) { return BB; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->succ_begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->succ_end(); + } +}; + +// Provide specializations of GraphTraits to be able to treat a +// MachineFunction as a graph of MachineBasicBlocks... and to walk it +// in inverse order. Inverse order for a function is considered +// to be when traversing the predecessor edges of a MBB +// instead of the successor edges. +// +template <> struct GraphTraits<Inverse<MachineBasicBlock*> > { + typedef MachineBasicBlock NodeType; + typedef MachineBasicBlock::pred_iterator ChildIteratorType; + static NodeType *getEntryNode(Inverse<MachineBasicBlock *> G) { + return G.Graph; + } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->pred_begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->pred_end(); + } +}; + +template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > { + typedef const MachineBasicBlock NodeType; + typedef MachineBasicBlock::const_pred_iterator ChildIteratorType; + static NodeType *getEntryNode(Inverse<const MachineBasicBlock*> G) { + return G.Graph; + } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->pred_begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->pred_end(); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineCodeEmitter.h b/include/llvm/CodeGen/MachineCodeEmitter.h new file mode 100644 index 0000000..aaa41a4 --- /dev/null +++ b/include/llvm/CodeGen/MachineCodeEmitter.h @@ -0,0 +1,330 @@ +//===-- llvm/CodeGen/MachineCodeEmitter.h - Code emission -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an abstract interface that is used by the machine code +// emission framework to output the code. This allows machine code emission to +// be separated from concerns such as resolution of call targets, and where the +// machine code will be written (memory or disk, f.e.). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINECODEEMITTER_H +#define LLVM_CODEGEN_MACHINECODEEMITTER_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class MachineBasicBlock; +class MachineConstantPool; +class MachineJumpTableInfo; +class MachineFunction; +class MachineModuleInfo; +class MachineRelocation; +class Value; +class GlobalValue; +class Function; + +/// MachineCodeEmitter - This class defines two sorts of methods: those for +/// emitting the actual bytes of machine code, and those for emitting auxillary +/// structures, such as jump tables, relocations, etc. +/// +/// Emission of machine code is complicated by the fact that we don't (in +/// general) know the size of the machine code that we're about to emit before +/// we emit it. As such, we preallocate a certain amount of memory, and set the +/// BufferBegin/BufferEnd pointers to the start and end of the buffer. As we +/// emit machine instructions, we advance the CurBufferPtr to indicate the +/// location of the next byte to emit. In the case of a buffer overflow (we +/// need to emit more machine code than we have allocated space for), the +/// CurBufferPtr will saturate to BufferEnd and ignore stores. Once the entire +/// function has been emitted, the overflow condition is checked, and if it has +/// occurred, more memory is allocated, and we reemit the code into it. +/// +class MachineCodeEmitter { +protected: + /// BufferBegin/BufferEnd - Pointers to the start and end of the memory + /// allocated for this code buffer. + uint8_t *BufferBegin, *BufferEnd; + + /// CurBufferPtr - Pointer to the next byte of memory to fill when emitting + /// code. This is guranteed to be in the range [BufferBegin,BufferEnd]. If + /// this pointer is at BufferEnd, it will never move due to code emission, and + /// all code emission requests will be ignored (this is the buffer overflow + /// condition). + uint8_t *CurBufferPtr; + +public: + virtual ~MachineCodeEmitter() {} + + /// startFunction - This callback is invoked when the specified function is + /// about to be code generated. This initializes the BufferBegin/End/Ptr + /// fields. + /// + virtual void startFunction(MachineFunction &F) = 0; + + /// finishFunction - This callback is invoked when the specified function has + /// finished code generation. If a buffer overflow has occurred, this method + /// returns true (the callee is required to try again), otherwise it returns + /// false. + /// + virtual bool finishFunction(MachineFunction &F) = 0; + + /// startGVStub - This callback is invoked when the JIT needs the + /// address of a GV (e.g. function) that has not been code generated yet. + /// The StubSize specifies the total size required by the stub. + /// + virtual void startGVStub(const GlobalValue* GV, unsigned StubSize, + unsigned Alignment = 1) = 0; + + /// startGVStub - This callback is invoked when the JIT needs the address of a + /// GV (e.g. function) that has not been code generated yet. Buffer points to + /// memory already allocated for this stub. + /// + virtual void startGVStub(const GlobalValue* GV, void *Buffer, + unsigned StubSize) = 0; + + /// finishGVStub - This callback is invoked to terminate a GV stub. + /// + virtual void *finishGVStub(const GlobalValue* F) = 0; + + /// emitByte - This callback is invoked when a byte needs to be written to the + /// output stream. + /// + void emitByte(uint8_t B) { + if (CurBufferPtr != BufferEnd) + *CurBufferPtr++ = B; + } + + /// emitWordLE - This callback is invoked when a 32-bit word needs to be + /// written to the output stream in little-endian format. + /// + void emitWordLE(unsigned W) { + if (4 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 0); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 24); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitWordBE - This callback is invoked when a 32-bit word needs to be + /// written to the output stream in big-endian format. + /// + void emitWordBE(unsigned W) { + if (4 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 24); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 0); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitDWordLE - This callback is invoked when a 64-bit word needs to be + /// written to the output stream in little-endian format. + /// + void emitDWordLE(uint64_t W) { + if (8 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 0); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 24); + *CurBufferPtr++ = (uint8_t)(W >> 32); + *CurBufferPtr++ = (uint8_t)(W >> 40); + *CurBufferPtr++ = (uint8_t)(W >> 48); + *CurBufferPtr++ = (uint8_t)(W >> 56); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitDWordBE - This callback is invoked when a 64-bit word needs to be + /// written to the output stream in big-endian format. + /// + void emitDWordBE(uint64_t W) { + if (8 <= BufferEnd-CurBufferPtr) { + *CurBufferPtr++ = (uint8_t)(W >> 56); + *CurBufferPtr++ = (uint8_t)(W >> 48); + *CurBufferPtr++ = (uint8_t)(W >> 40); + *CurBufferPtr++ = (uint8_t)(W >> 32); + *CurBufferPtr++ = (uint8_t)(W >> 24); + *CurBufferPtr++ = (uint8_t)(W >> 16); + *CurBufferPtr++ = (uint8_t)(W >> 8); + *CurBufferPtr++ = (uint8_t)(W >> 0); + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitAlignment - Move the CurBufferPtr pointer up the the specified + /// alignment (saturated to BufferEnd of course). + void emitAlignment(unsigned Alignment) { + if (Alignment == 0) Alignment = 1; + + if(Alignment <= (uintptr_t)(BufferEnd-CurBufferPtr)) { + // Move the current buffer ptr up to the specified alignment. + CurBufferPtr = + (uint8_t*)(((uintptr_t)CurBufferPtr+Alignment-1) & + ~(uintptr_t)(Alignment-1)); + } else { + CurBufferPtr = BufferEnd; + } + } + + + /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be + /// written to the output stream. + void emitULEB128Bytes(unsigned Value) { + do { + uint8_t Byte = Value & 0x7f; + Value >>= 7; + if (Value) Byte |= 0x80; + emitByte(Byte); + } while (Value); + } + + /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be + /// written to the output stream. + void emitSLEB128Bytes(int32_t Value) { + int32_t Sign = Value >> (8 * sizeof(Value) - 1); + bool IsMore; + + do { + uint8_t Byte = Value & 0x7f; + Value >>= 7; + IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0; + if (IsMore) Byte |= 0x80; + emitByte(Byte); + } while (IsMore); + } + + /// emitString - This callback is invoked when a String needs to be + /// written to the output stream. + void emitString(const std::string &String) { + for (unsigned i = 0, N = static_cast<unsigned>(String.size()); + i < N; ++i) { + uint8_t C = String[i]; + emitByte(C); + } + emitByte(0); + } + + /// emitInt32 - Emit a int32 directive. + void emitInt32(int32_t Value) { + if (4 <= BufferEnd-CurBufferPtr) { + *((uint32_t*)CurBufferPtr) = Value; + CurBufferPtr += 4; + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitInt64 - Emit a int64 directive. + void emitInt64(uint64_t Value) { + if (8 <= BufferEnd-CurBufferPtr) { + *((uint64_t*)CurBufferPtr) = Value; + CurBufferPtr += 8; + } else { + CurBufferPtr = BufferEnd; + } + } + + /// emitInt32At - Emit the Int32 Value in Addr. + void emitInt32At(uintptr_t *Addr, uintptr_t Value) { + if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd) + (*(uint32_t*)Addr) = (uint32_t)Value; + } + + /// emitInt64At - Emit the Int64 Value in Addr. + void emitInt64At(uintptr_t *Addr, uintptr_t Value) { + if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd) + (*(uint64_t*)Addr) = (uint64_t)Value; + } + + + /// emitLabel - Emits a label + virtual void emitLabel(uint64_t LabelID) = 0; + + /// allocateSpace - Allocate a block of space in the current output buffer, + /// returning null (and setting conditions to indicate buffer overflow) on + /// failure. Alignment is the alignment in bytes of the buffer desired. + virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) { + emitAlignment(Alignment); + void *Result; + + // Check for buffer overflow. + if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) { + CurBufferPtr = BufferEnd; + Result = 0; + } else { + // Allocate the space. + Result = CurBufferPtr; + CurBufferPtr += Size; + } + + return Result; + } + + /// StartMachineBasicBlock - This should be called by the target when a new + /// basic block is about to be emitted. This way the MCE knows where the + /// start of the block is, and can implement getMachineBasicBlockAddress. + virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) = 0; + + /// getCurrentPCValue - This returns the address that the next emitted byte + /// will be output to. + /// + virtual uintptr_t getCurrentPCValue() const { + return (uintptr_t)CurBufferPtr; + } + + /// getCurrentPCOffset - Return the offset from the start of the emitted + /// buffer that we are currently writing to. + uintptr_t getCurrentPCOffset() const { + return CurBufferPtr-BufferBegin; + } + + /// addRelocation - Whenever a relocatable address is needed, it should be + /// noted with this interface. + virtual void addRelocation(const MachineRelocation &MR) = 0; + + + /// FIXME: These should all be handled with relocations! + + /// getConstantPoolEntryAddress - Return the address of the 'Index' entry in + /// the constant pool that was last emitted with the emitConstantPool method. + /// + virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const = 0; + + /// getJumpTableEntryAddress - Return the address of the jump table with index + /// 'Index' in the function that last called initJumpTableInfo. + /// + virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const = 0; + + /// getMachineBasicBlockAddress - Return the address of the specified + /// MachineBasicBlock, only usable after the label for the MBB has been + /// emitted. + /// + virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0; + + /// getLabelAddress - Return the address of the specified LabelID, only usable + /// after the LabelID has been emitted. + /// + virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0; + + /// Specifies the MachineModuleInfo object. This is used for exception handling + /// purposes. + virtual void setModuleInfo(MachineModuleInfo* Info) = 0; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineCodeInfo.h b/include/llvm/CodeGen/MachineCodeInfo.h new file mode 100644 index 0000000..024e602 --- /dev/null +++ b/include/llvm/CodeGen/MachineCodeInfo.h @@ -0,0 +1,51 @@ +//===-- MachineCodeInfo.h - Class used to report JIT info -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines MachineCodeInfo, a class used by the JIT ExecutionEngine +// to report information about the generated machine code. +// +// See JIT::runJITOnFunction for usage. +// +//===----------------------------------------------------------------------===// + +#ifndef EE_MACHINE_CODE_INFO_H +#define EE_MACHINE_CODE_INFO_H + +namespace llvm { + +class MachineCodeInfo { +private: + size_t Size; // Number of bytes in memory used + void *Address; // The address of the function in memory + +public: + MachineCodeInfo() : Size(0), Address(0) {} + + void setSize(size_t s) { + Size = s; + } + + void setAddress(void *a) { + Address = a; + } + + size_t size() const { + return Size; + } + + void *address() const { + return Address; + } + +}; + +} + +#endif + diff --git a/include/llvm/CodeGen/MachineConstantPool.h b/include/llvm/CodeGen/MachineConstantPool.h new file mode 100644 index 0000000..99996cf --- /dev/null +++ b/include/llvm/CodeGen/MachineConstantPool.h @@ -0,0 +1,147 @@ +//===-- CodeGen/MachineConstantPool.h - Abstract Constant Pool --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file declares the MachineConstantPool class which is an abstract +/// constant pool to keep track of constants referenced by a function. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H +#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H + +#include <cassert> +#include <climits> +#include <vector> + +namespace llvm { + +class Constant; +class FoldingSetNodeID; +class TargetData; +class TargetMachine; +class Type; +class MachineConstantPool; +class raw_ostream; + +/// Abstract base class for all machine specific constantpool value subclasses. +/// +class MachineConstantPoolValue { + const Type *Ty; + +public: + explicit MachineConstantPoolValue(const Type *ty) : Ty(ty) {} + virtual ~MachineConstantPoolValue() {} + + /// getType - get type of this MachineConstantPoolValue. + /// + inline const Type *getType() const { return Ty; } + + virtual int getExistingMachineCPValue(MachineConstantPool *CP, + unsigned Alignment) = 0; + + virtual void AddSelectionDAGCSEId(FoldingSetNodeID &ID) = 0; + + /// print - Implement operator<< + virtual void print(raw_ostream &O) const = 0; +}; + +inline raw_ostream &operator<<(raw_ostream &OS, + const MachineConstantPoolValue &V) { + V.print(OS); + return OS; +} + + +/// This class is a data container for one entry in a MachineConstantPool. +/// It contains a pointer to the value and an offset from the start of +/// the constant pool. +/// @brief An entry in a MachineConstantPool +class MachineConstantPoolEntry { +public: + /// The constant itself. + union { + Constant *ConstVal; + MachineConstantPoolValue *MachineCPVal; + } Val; + + /// The required alignment for this entry. The top bit is set when Val is + /// a MachineConstantPoolValue. + unsigned Alignment; + + MachineConstantPoolEntry(Constant *V, unsigned A) + : Alignment(A) { + Val.ConstVal = V; + } + MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A) + : Alignment(A) { + Val.MachineCPVal = V; + Alignment |= 1 << (sizeof(unsigned)*CHAR_BIT-1); + } + + bool isMachineConstantPoolEntry() const { + return (int)Alignment < 0; + } + + int getAlignment() const { + return Alignment & ~(1 << (sizeof(unsigned)*CHAR_BIT-1)); + } + + const Type *getType() const; +}; + +/// The MachineConstantPool class keeps track of constants referenced by a +/// function which must be spilled to memory. This is used for constants which +/// are unable to be used directly as operands to instructions, which typically +/// include floating point and large integer constants. +/// +/// Instructions reference the address of these constant pool constants through +/// the use of MO_ConstantPoolIndex values. When emitting assembly or machine +/// code, these virtual address references are converted to refer to the +/// address of the function constant pool values. +/// @brief The machine constant pool. +class MachineConstantPool { + const TargetData *TD; ///< The machine's TargetData. + unsigned PoolAlignment; ///< The alignment for the pool. + std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants. +public: + /// @brief The only constructor. + explicit MachineConstantPool(const TargetData *td) + : TD(td), PoolAlignment(1) {} + ~MachineConstantPool(); + + /// getConstantPoolAlignment - Return the the alignment required by + /// the whole constant pool, of which the first element must be aligned. + unsigned getConstantPoolAlignment() const { return PoolAlignment; } + + /// getConstantPoolIndex - Create a new entry in the constant pool or return + /// an existing one. User must specify the minimum required alignment for + /// the object. + unsigned getConstantPoolIndex(Constant *C, unsigned Alignment); + unsigned getConstantPoolIndex(MachineConstantPoolValue *V,unsigned Alignment); + + /// isEmpty - Return true if this constant pool contains no constants. + bool isEmpty() const { return Constants.empty(); } + + const std::vector<MachineConstantPoolEntry> &getConstants() const { + return Constants; + } + + /// print - Used by the MachineFunction printer to print information about + /// constant pool objects. Implemented in MachineFunction.cpp + /// + void print(raw_ostream &OS) const; + + /// dump - Call print(cerr) to be called from the debugger. + void dump() const; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineDominators.h b/include/llvm/CodeGen/MachineDominators.h new file mode 100644 index 0000000..5981e5a --- /dev/null +++ b/include/llvm/CodeGen/MachineDominators.h @@ -0,0 +1,199 @@ +//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines classes mirroring those in llvm/Analysis/Dominators.h, +// but for target-specific code rather than target-independent IR. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H +#define LLVM_CODEGEN_MACHINEDOMINATORS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/DominatorInternals.h" + +namespace llvm { + +inline void WriteAsOperand(std::ostream &, const MachineBasicBlock*, bool t) { } + +template<> +inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB) { + this->Roots.push_back(MBB); +} + +EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>); +EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>); + +typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode; + +//===------------------------------------- +/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to +/// compute a normal dominator tree. +/// +class MachineDominatorTree : public MachineFunctionPass { +public: + static char ID; // Pass ID, replacement for typeid + DominatorTreeBase<MachineBasicBlock>* DT; + + MachineDominatorTree(); + + ~MachineDominatorTree(); + + DominatorTreeBase<MachineBasicBlock>& getBase() { return *DT; } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + /// getRoots - Return the root blocks of the current CFG. This may include + /// multiple blocks if we are computing post dominators. For forward + /// dominators, this will always be a single block (the entry node). + /// + inline const std::vector<MachineBasicBlock*> &getRoots() const { + return DT->getRoots(); + } + + inline MachineBasicBlock *getRoot() const { + return DT->getRoot(); + } + + inline MachineDomTreeNode *getRootNode() const { + return DT->getRootNode(); + } + + virtual bool runOnMachineFunction(MachineFunction &F); + + inline bool dominates(MachineDomTreeNode* A, MachineDomTreeNode* B) const { + return DT->dominates(A, B); + } + + inline bool dominates(MachineBasicBlock* A, MachineBasicBlock* B) const { + return DT->dominates(A, B); + } + + // dominates - Return true if A dominates B. This performs the + // special checks necessary if A and B are in the same basic block. + bool dominates(MachineInstr *A, MachineInstr *B) const { + MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent(); + if (BBA != BBB) return DT->dominates(BBA, BBB); + + // Loop through the basic block until we find A or B. + MachineBasicBlock::iterator I = BBA->begin(); + for (; &*I != A && &*I != B; ++I) /*empty*/; + + //if(!DT.IsPostDominators) { + // A dominates B if it is found first in the basic block. + return &*I == A; + //} else { + // // A post-dominates B if B is found first in the basic block. + // return &*I == B; + //} + } + + inline bool properlyDominates(const MachineDomTreeNode* A, + MachineDomTreeNode* B) const { + return DT->properlyDominates(A, B); + } + + inline bool properlyDominates(MachineBasicBlock* A, + MachineBasicBlock* B) const { + return DT->properlyDominates(A, B); + } + + /// findNearestCommonDominator - Find nearest common dominator basic block + /// for basic block A and B. If there is no such block then return NULL. + inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A, + MachineBasicBlock *B) { + return DT->findNearestCommonDominator(A, B); + } + + inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const { + return DT->getNode(BB); + } + + /// getNode - return the (Post)DominatorTree node for the specified basic + /// block. This is the same as using operator[] on this class. + /// + inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const { + return DT->getNode(BB); + } + + /// addNewBlock - Add a new node to the dominator tree information. This + /// creates a new node as a child of DomBB dominator node,linking it into + /// the children list of the immediate dominator. + inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB, + MachineBasicBlock *DomBB) { + return DT->addNewBlock(BB, DomBB); + } + + /// changeImmediateDominator - This method is used to update the dominator + /// tree information when a node's immediate dominator changes. + /// + inline void changeImmediateDominator(MachineBasicBlock *N, + MachineBasicBlock* NewIDom) { + DT->changeImmediateDominator(N, NewIDom); + } + + inline void changeImmediateDominator(MachineDomTreeNode *N, + MachineDomTreeNode* NewIDom) { + DT->changeImmediateDominator(N, NewIDom); + } + + /// eraseNode - Removes a node from the dominator tree. Block must not + /// domiante any other blocks. Removes node from its immediate dominator's + /// children list. Deletes dominator node associated with basic block BB. + inline void eraseNode(MachineBasicBlock *BB) { + DT->eraseNode(BB); + } + + /// splitBlock - BB is split and now it has one successor. Update dominator + /// tree to reflect this change. + inline void splitBlock(MachineBasicBlock* NewBB) { + DT->splitBlock(NewBB); + } + + + virtual void releaseMemory(); + + virtual void print(std::ostream &OS, const Module* M= 0) const { + DT->print(OS, M); + } +}; + +//===------------------------------------- +/// DominatorTree GraphTraits specialization so the DominatorTree can be +/// iterable by generic graph iterators. +/// + +template<class T> struct GraphTraits; + +template <> struct GraphTraits<MachineDomTreeNode *> { + typedef MachineDomTreeNode NodeType; + typedef NodeType::iterator ChildIteratorType; + + static NodeType *getEntryNode(NodeType *N) { + return N; + } + static inline ChildIteratorType child_begin(NodeType* N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType* N) { + return N->end(); + } +}; + +template <> struct GraphTraits<MachineDominatorTree*> + : public GraphTraits<MachineDomTreeNode *> { + static NodeType *getEntryNode(MachineDominatorTree *DT) { + return DT->getRootNode(); + } +}; + +} + +#endif diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h new file mode 100644 index 0000000..4c981f7 --- /dev/null +++ b/include/llvm/CodeGen/MachineFrameInfo.h @@ -0,0 +1,411 @@ +//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The file defines the MachineFrameInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H +#define LLVM_CODEGEN_MACHINEFRAMEINFO_H + +#include "llvm/Support/DataTypes.h" +#include <cassert> +#include <iosfwd> +#include <vector> + +namespace llvm { +class TargetData; +class TargetRegisterClass; +class Type; +class MachineModuleInfo; +class MachineFunction; +class TargetFrameInfo; + +/// The CalleeSavedInfo class tracks the information need to locate where a +/// callee saved register in the current frame. +class CalleeSavedInfo { + +private: + unsigned Reg; + const TargetRegisterClass *RegClass; + int FrameIdx; + +public: + CalleeSavedInfo(unsigned R, const TargetRegisterClass *RC, int FI = 0) + : Reg(R) + , RegClass(RC) + , FrameIdx(FI) + {} + + // Accessors. + unsigned getReg() const { return Reg; } + const TargetRegisterClass *getRegClass() const { return RegClass; } + int getFrameIdx() const { return FrameIdx; } + void setFrameIdx(int FI) { FrameIdx = FI; } +}; + +/// The MachineFrameInfo class represents an abstract stack frame until +/// prolog/epilog code is inserted. This class is key to allowing stack frame +/// representation optimizations, such as frame pointer elimination. It also +/// allows more mundane (but still important) optimizations, such as reordering +/// of abstract objects on the stack frame. +/// +/// To support this, the class assigns unique integer identifiers to stack +/// objects requested clients. These identifiers are negative integers for +/// fixed stack objects (such as arguments passed on the stack) or nonnegative +/// for objects that may be reordered. Instructions which refer to stack +/// objects use a special MO_FrameIndex operand to represent these frame +/// indexes. +/// +/// Because this class keeps track of all references to the stack frame, it +/// knows when a variable sized object is allocated on the stack. This is the +/// sole condition which prevents frame pointer elimination, which is an +/// important optimization on register-poor architectures. Because original +/// variable sized alloca's in the source program are the only source of +/// variable sized stack objects, it is safe to decide whether there will be +/// any variable sized objects before all stack objects are known (for +/// example, register allocator spill code never needs variable sized +/// objects). +/// +/// When prolog/epilog code emission is performed, the final stack frame is +/// built and the machine instructions are modified to refer to the actual +/// stack offsets of the object, eliminating all MO_FrameIndex operands from +/// the program. +/// +/// @brief Abstract Stack Frame Information +class MachineFrameInfo { + + // StackObject - Represent a single object allocated on the stack. + struct StackObject { + // The size of this object on the stack. 0 means a variable sized object, + // ~0ULL means a dead object. + uint64_t Size; + + // Alignment - The required alignment of this stack slot. + unsigned Alignment; + + // isImmutable - If true, the value of the stack object is set before + // entering the function and is not modified inside the function. By + // default, fixed objects are immutable unless marked otherwise. + bool isImmutable; + + // SPOffset - The offset of this object from the stack pointer on entry to + // the function. This field has no meaning for a variable sized element. + int64_t SPOffset; + + StackObject(uint64_t Sz, unsigned Al, int64_t SP = 0, bool IM = false) + : Size(Sz), Alignment(Al), isImmutable(IM), SPOffset(SP) {} + }; + + /// Objects - The list of stack objects allocated... + /// + std::vector<StackObject> Objects; + + /// NumFixedObjects - This contains the number of fixed objects contained on + /// the stack. Because fixed objects are stored at a negative index in the + /// Objects list, this is also the index to the 0th object in the list. + /// + unsigned NumFixedObjects; + + /// HasVarSizedObjects - This boolean keeps track of whether any variable + /// sized objects have been allocated yet. + /// + bool HasVarSizedObjects; + + /// FrameAddressTaken - This boolean keeps track of whether there is a call + /// to builtin \@llvm.frameaddress. + bool FrameAddressTaken; + + /// StackSize - The prolog/epilog code inserter calculates the final stack + /// offsets for all of the fixed size objects, updating the Objects list + /// above. It then updates StackSize to contain the number of bytes that need + /// to be allocated on entry to the function. + /// + uint64_t StackSize; + + /// OffsetAdjustment - The amount that a frame offset needs to be adjusted to + /// have the actual offset from the stack/frame pointer. The calculation is + /// MFI->getObjectOffset(Index) + StackSize - TFI.getOffsetOfLocalArea() + + /// OffsetAdjustment. If OffsetAdjustment is zero (default) then offsets are + /// away from TOS. If OffsetAdjustment == StackSize then offsets are toward + /// TOS. + int OffsetAdjustment; + + /// MaxAlignment - The prolog/epilog code inserter may process objects + /// that require greater alignment than the default alignment the target + /// provides. To handle this, MaxAlignment is set to the maximum alignment + /// needed by the objects on the current frame. If this is greater than the + /// native alignment maintained by the compiler, dynamic alignment code will + /// be needed. + /// + unsigned MaxAlignment; + + /// HasCalls - Set to true if this function has any function calls. This is + /// only valid during and after prolog/epilog code insertion. + bool HasCalls; + + /// StackProtectorIdx - The frame index for the stack protector. + int StackProtectorIdx; + + /// MaxCallFrameSize - This contains the size of the largest call frame if the + /// target uses frame setup/destroy pseudo instructions (as defined in the + /// TargetFrameInfo class). This information is important for frame pointer + /// elimination. If is only valid during and after prolog/epilog code + /// insertion. + /// + unsigned MaxCallFrameSize; + + /// CSInfo - The prolog/epilog code inserter fills in this vector with each + /// callee saved register saved in the frame. Beyond its use by the prolog/ + /// epilog code inserter, this data used for debug info and exception + /// handling. + std::vector<CalleeSavedInfo> CSInfo; + + /// MMI - This field is set (via setMachineModuleInfo) by a module info + /// consumer (ex. DwarfWriter) to indicate that frame layout information + /// should be acquired. Typically, it's the responsibility of the target's + /// TargetRegisterInfo prologue/epilogue emitting code to inform + /// MachineModuleInfo of frame layouts. + MachineModuleInfo *MMI; + + /// TargetFrameInfo - Target information about frame layout. + /// + const TargetFrameInfo &TFI; +public: + explicit MachineFrameInfo(const TargetFrameInfo &tfi) : TFI(tfi) { + StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0; + HasVarSizedObjects = false; + FrameAddressTaken = false; + HasCalls = false; + StackProtectorIdx = -1; + MaxCallFrameSize = 0; + MMI = 0; + } + + /// hasStackObjects - Return true if there are any stack objects in this + /// function. + /// + bool hasStackObjects() const { return !Objects.empty(); } + + /// hasVarSizedObjects - This method may be called any time after instruction + /// selection is complete to determine if the stack frame for this function + /// contains any variable sized objects. + /// + bool hasVarSizedObjects() const { return HasVarSizedObjects; } + + /// getStackProtectorIndex/setStackProtectorIndex - Return the index for the + /// stack protector object. + /// + int getStackProtectorIndex() const { return StackProtectorIdx; } + void setStackProtectorIndex(int I) { StackProtectorIdx = I; } + + /// isFrameAddressTaken - This method may be called any time after instruction + /// selection is complete to determine if there is a call to + /// \@llvm.frameaddress in this function. + bool isFrameAddressTaken() const { return FrameAddressTaken; } + void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; } + + /// getObjectIndexBegin - Return the minimum frame object index. + /// + int getObjectIndexBegin() const { return -NumFixedObjects; } + + /// getObjectIndexEnd - Return one past the maximum frame object index. + /// + int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; } + + /// getNumFixedObjects() - Return the number of fixed objects. + unsigned getNumFixedObjects() const { return NumFixedObjects; } + + /// getNumObjects() - Return the number of objects. + /// + unsigned getNumObjects() const { return Objects.size(); } + + /// getObjectSize - Return the size of the specified object. + /// + int64_t getObjectSize(int ObjectIdx) const { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx+NumFixedObjects].Size; + } + + /// setObjectSize - Change the size of the specified stack object. + void setObjectSize(int ObjectIdx, int64_t Size) { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + Objects[ObjectIdx+NumFixedObjects].Size = Size; + } + + /// getObjectAlignment - Return the alignment of the specified stack object. + unsigned getObjectAlignment(int ObjectIdx) const { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx+NumFixedObjects].Alignment; + } + + /// setObjectAlignment - Change the alignment of the specified stack object. + void setObjectAlignment(int ObjectIdx, unsigned Align) { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + Objects[ObjectIdx+NumFixedObjects].Alignment = Align; + } + + /// getObjectOffset - Return the assigned stack offset of the specified object + /// from the incoming stack pointer. + /// + int64_t getObjectOffset(int ObjectIdx) const { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + assert(!isDeadObjectIndex(ObjectIdx) && + "Getting frame offset for a dead object?"); + return Objects[ObjectIdx+NumFixedObjects].SPOffset; + } + + /// setObjectOffset - Set the stack frame offset of the specified object. The + /// offset is relative to the stack pointer on entry to the function. + /// + void setObjectOffset(int ObjectIdx, int64_t SPOffset) { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + assert(!isDeadObjectIndex(ObjectIdx) && + "Setting frame offset for a dead object?"); + Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset; + } + + /// getStackSize - Return the number of bytes that must be allocated to hold + /// all of the fixed size frame objects. This is only valid after + /// Prolog/Epilog code insertion has finalized the stack frame layout. + /// + uint64_t getStackSize() const { return StackSize; } + + /// setStackSize - Set the size of the stack... + /// + void setStackSize(uint64_t Size) { StackSize = Size; } + + /// getOffsetAdjustment - Return the correction for frame offsets. + /// + int getOffsetAdjustment() const { return OffsetAdjustment; } + + /// setOffsetAdjustment - Set the correction for frame offsets. + /// + void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; } + + /// getMaxAlignment - Return the alignment in bytes that this function must be + /// aligned to, which is greater than the default stack alignment provided by + /// the target. + /// + unsigned getMaxAlignment() const { return MaxAlignment; } + + /// setMaxAlignment - Set the preferred alignment. + /// + void setMaxAlignment(unsigned Align) { MaxAlignment = Align; } + + /// hasCalls - Return true if the current function has no function calls. + /// This is only valid during or after prolog/epilog code emission. + /// + bool hasCalls() const { return HasCalls; } + void setHasCalls(bool V) { HasCalls = V; } + + /// getMaxCallFrameSize - Return the maximum size of a call frame that must be + /// allocated for an outgoing function call. This is only available if + /// CallFrameSetup/Destroy pseudo instructions are used by the target, and + /// then only during or after prolog/epilog code insertion. + /// + unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; } + void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; } + + /// CreateFixedObject - Create a new object at a fixed location on the stack. + /// All fixed objects should be created before other objects are created for + /// efficiency. By default, fixed objects are immutable. This returns an + /// index with a negative value. + /// + int CreateFixedObject(uint64_t Size, int64_t SPOffset, + bool Immutable = true); + + + /// isFixedObjectIndex - Returns true if the specified index corresponds to a + /// fixed stack object. + bool isFixedObjectIndex(int ObjectIdx) const { + return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects); + } + + /// isImmutableObjectIndex - Returns true if the specified index corresponds + /// to an immutable object. + bool isImmutableObjectIndex(int ObjectIdx) const { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx+NumFixedObjects].isImmutable; + } + + /// isDeadObjectIndex - Returns true if the specified index corresponds to + /// a dead object. + bool isDeadObjectIndex(int ObjectIdx) const { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL; + } + + /// CreateStackObject - Create a new statically sized stack object, returning + /// a nonnegative identifier to represent it. + /// + int CreateStackObject(uint64_t Size, unsigned Alignment) { + assert(Size != 0 && "Cannot allocate zero size stack objects!"); + Objects.push_back(StackObject(Size, Alignment)); + return (int)Objects.size()-NumFixedObjects-1; + } + + /// RemoveStackObject - Remove or mark dead a statically sized stack object. + /// + void RemoveStackObject(int ObjectIdx) { + // Mark it dead. + Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL; + } + + /// CreateVariableSizedObject - Notify the MachineFrameInfo object that a + /// variable sized object has been created. This must be created whenever a + /// variable sized object is created, whether or not the index returned is + /// actually used. + /// + int CreateVariableSizedObject() { + HasVarSizedObjects = true; + Objects.push_back(StackObject(0, 1)); + return (int)Objects.size()-NumFixedObjects-1; + } + + /// getCalleeSavedInfo - Returns a reference to call saved info vector for the + /// current function. + const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const { + return CSInfo; + } + + /// setCalleeSavedInfo - Used by prolog/epilog inserter to set the function's + /// callee saved information. + void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) { + CSInfo = CSI; + } + + /// getMachineModuleInfo - Used by a prologue/epilogue + /// emitter (TargetRegisterInfo) to provide frame layout information. + MachineModuleInfo *getMachineModuleInfo() const { return MMI; } + + /// setMachineModuleInfo - Used by a meta info consumer (DwarfWriter) to + /// indicate that frame layout information should be gathered. + void setMachineModuleInfo(MachineModuleInfo *mmi) { MMI = mmi; } + + /// print - Used by the MachineFunction printer to print information about + /// stack objects. Implemented in MachineFunction.cpp + /// + void print(const MachineFunction &MF, std::ostream &OS) const; + + /// dump - Call print(MF, std::cerr) to be called from the debugger. + void dump(const MachineFunction &MF) const; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h new file mode 100644 index 0000000..a110e58 --- /dev/null +++ b/include/llvm/CodeGen/MachineFunction.h @@ -0,0 +1,407 @@ +//===-- llvm/CodeGen/MachineFunction.h --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Collect native machine code for a function. This class contains a list of +// MachineBasicBlock instances that make up the current compiled function. +// +// This class also contains pointers to various classes which hold +// target-specific information about the generated code. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H +#define LLVM_CODEGEN_MACHINEFUNCTION_H + +#include "llvm/ADT/ilist.h" +#include "llvm/CodeGen/DebugLoc.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/Support/Annotation.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/Recycler.h" + +namespace llvm { + +class Function; +class MachineRegisterInfo; +class MachineFrameInfo; +class MachineConstantPool; +class MachineJumpTableInfo; +class TargetMachine; +class TargetRegisterClass; + +template <> +struct ilist_traits<MachineBasicBlock> + : public ilist_default_traits<MachineBasicBlock> { + mutable ilist_node<MachineBasicBlock> Sentinel; +public: + MachineBasicBlock *createSentinel() const { + return static_cast<MachineBasicBlock*>(&Sentinel); + } + void destroySentinel(MachineBasicBlock *) const {} + + MachineBasicBlock *provideInitialHead() const { return createSentinel(); } + MachineBasicBlock *ensureHead(MachineBasicBlock*) const { + return createSentinel(); + } + static void noteHead(MachineBasicBlock*, MachineBasicBlock*) {} + + void addNodeToList(MachineBasicBlock* MBB); + void removeNodeFromList(MachineBasicBlock* MBB); + void deleteNode(MachineBasicBlock *MBB); +private: + void createNode(const MachineBasicBlock &); +}; + +/// MachineFunctionInfo - This class can be derived from and used by targets to +/// hold private target-specific information for each MachineFunction. Objects +/// of type are accessed/created with MF::getInfo and destroyed when the +/// MachineFunction is destroyed. +struct MachineFunctionInfo { + virtual ~MachineFunctionInfo() {} +}; + +class MachineFunction : private Annotation { + const Function *Fn; + const TargetMachine &Target; + + // RegInfo - Information about each register in use in the function. + MachineRegisterInfo *RegInfo; + + // Used to keep track of target-specific per-machine function information for + // the target implementation. + MachineFunctionInfo *MFInfo; + + // Keep track of objects allocated on the stack. + MachineFrameInfo *FrameInfo; + + // Keep track of constants which are spilled to memory + MachineConstantPool *ConstantPool; + + // Keep track of jump tables for switch instructions + MachineJumpTableInfo *JumpTableInfo; + + // Function-level unique numbering for MachineBasicBlocks. When a + // MachineBasicBlock is inserted into a MachineFunction is it automatically + // numbered and this vector keeps track of the mapping from ID's to MBB's. + std::vector<MachineBasicBlock*> MBBNumbering; + + // Pool-allocate MachineFunction-lifetime and IR objects. + BumpPtrAllocator Allocator; + + // Allocation management for instructions in function. + Recycler<MachineInstr> InstructionRecycler; + + // Allocation management for basic blocks in function. + Recycler<MachineBasicBlock> BasicBlockRecycler; + + // List of machine basic blocks in function + typedef ilist<MachineBasicBlock> BasicBlockListType; + BasicBlockListType BasicBlocks; + + // Default debug location. Used to print out the debug label at the beginning + // of a function. + DebugLoc DefaultDebugLoc; + + // Tracks debug locations. + DebugLocTracker DebugLocInfo; + +public: + MachineFunction(const Function *Fn, const TargetMachine &TM); + ~MachineFunction(); + + /// getFunction - Return the LLVM function that this machine code represents + /// + const Function *getFunction() const { return Fn; } + + /// getTarget - Return the target machine this machine code is compiled with + /// + const TargetMachine &getTarget() const { return Target; } + + /// getRegInfo - Return information about the registers currently in use. + /// + MachineRegisterInfo &getRegInfo() { return *RegInfo; } + const MachineRegisterInfo &getRegInfo() const { return *RegInfo; } + + /// getFrameInfo - Return the frame info object for the current function. + /// This object contains information about objects allocated on the stack + /// frame of the current function in an abstract way. + /// + MachineFrameInfo *getFrameInfo() { return FrameInfo; } + const MachineFrameInfo *getFrameInfo() const { return FrameInfo; } + + /// getJumpTableInfo - Return the jump table info object for the current + /// function. This object contains information about jump tables for switch + /// instructions in the current function. + /// + MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; } + const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; } + + /// getConstantPool - Return the constant pool object for the current + /// function. + /// + MachineConstantPool *getConstantPool() { return ConstantPool; } + const MachineConstantPool *getConstantPool() const { return ConstantPool; } + + /// MachineFunctionInfo - Keep track of various per-function pieces of + /// information for backends that would like to do so. + /// + template<typename Ty> + Ty *getInfo() { + if (!MFInfo) { + // This should be just `new (Allocator.Allocate<Ty>()) Ty(*this)', but + // that apparently breaks GCC 3.3. + Ty *Loc = static_cast<Ty*>(Allocator.Allocate(sizeof(Ty), + AlignOf<Ty>::Alignment)); + MFInfo = new (Loc) Ty(*this); + } + + assert((void*)dynamic_cast<Ty*>(MFInfo) == (void*)MFInfo && + "Invalid concrete type or multiple inheritence for getInfo"); + return static_cast<Ty*>(MFInfo); + } + + template<typename Ty> + const Ty *getInfo() const { + return const_cast<MachineFunction*>(this)->getInfo<Ty>(); + } + + /// getBlockNumbered - MachineBasicBlocks are automatically numbered when they + /// are inserted into the machine function. The block number for a machine + /// basic block can be found by using the MBB::getBlockNumber method, this + /// method provides the inverse mapping. + /// + MachineBasicBlock *getBlockNumbered(unsigned N) const { + assert(N < MBBNumbering.size() && "Illegal block number"); + assert(MBBNumbering[N] && "Block was removed from the machine function!"); + return MBBNumbering[N]; + } + + /// getNumBlockIDs - Return the number of MBB ID's allocated. + /// + unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); } + + /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and + /// recomputes them. This guarantees that the MBB numbers are sequential, + /// dense, and match the ordering of the blocks within the function. If a + /// specific MachineBasicBlock is specified, only that block and those after + /// it are renumbered. + void RenumberBlocks(MachineBasicBlock *MBBFrom = 0); + + /// print - Print out the MachineFunction in a format suitable for debugging + /// to the specified stream. + /// + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + /// viewCFG - This function is meant for use from the debugger. You can just + /// say 'call F->viewCFG()' and a ghostview window should pop up from the + /// program, displaying the CFG of the current function with the code for each + /// basic block inside. This depends on there being a 'dot' and 'gv' program + /// in your path. + /// + void viewCFG() const; + + /// viewCFGOnly - This function is meant for use from the debugger. It works + /// just like viewCFG, but it does not include the contents of basic blocks + /// into the nodes, just the label. If you are only interested in the CFG + /// this can make the graph smaller. + /// + void viewCFGOnly() const; + + /// dump - Print the current MachineFunction to cerr, useful for debugger use. + /// + void dump() const; + + /// construct - Allocate and initialize a MachineFunction for a given Function + /// and Target + /// + static MachineFunction& construct(const Function *F, const TargetMachine &TM); + + /// destruct - Destroy the MachineFunction corresponding to a given Function + /// + static void destruct(const Function *F); + + /// get - Return a handle to a MachineFunction corresponding to the given + /// Function. This should not be called before "construct()" for a given + /// Function. + /// + static MachineFunction& get(const Function *F); + + // Provide accessors for the MachineBasicBlock list... + typedef BasicBlockListType::iterator iterator; + typedef BasicBlockListType::const_iterator const_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + + /// addLiveIn - Add the specified physical register as a live-in value and + /// create a corresponding virtual register for it. + unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC); + + //===--------------------------------------------------------------------===// + // BasicBlock accessor functions. + // + iterator begin() { return BasicBlocks.begin(); } + const_iterator begin() const { return BasicBlocks.begin(); } + iterator end () { return BasicBlocks.end(); } + const_iterator end () const { return BasicBlocks.end(); } + + reverse_iterator rbegin() { return BasicBlocks.rbegin(); } + const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); } + reverse_iterator rend () { return BasicBlocks.rend(); } + const_reverse_iterator rend () const { return BasicBlocks.rend(); } + + unsigned size() const { return (unsigned)BasicBlocks.size();} + bool empty() const { return BasicBlocks.empty(); } + const MachineBasicBlock &front() const { return BasicBlocks.front(); } + MachineBasicBlock &front() { return BasicBlocks.front(); } + const MachineBasicBlock & back() const { return BasicBlocks.back(); } + MachineBasicBlock & back() { return BasicBlocks.back(); } + + void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); } + void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); } + void insert(iterator MBBI, MachineBasicBlock *MBB) { + BasicBlocks.insert(MBBI, MBB); + } + void splice(iterator InsertPt, iterator MBBI) { + BasicBlocks.splice(InsertPt, BasicBlocks, MBBI); + } + + void remove(iterator MBBI) { + BasicBlocks.remove(MBBI); + } + void erase(iterator MBBI) { + BasicBlocks.erase(MBBI); + } + + //===--------------------------------------------------------------------===// + // Internal functions used to automatically number MachineBasicBlocks + // + + /// getNextMBBNumber - Returns the next unique number to be assigned + /// to a MachineBasicBlock in this MachineFunction. + /// + unsigned addToMBBNumbering(MachineBasicBlock *MBB) { + MBBNumbering.push_back(MBB); + return (unsigned)MBBNumbering.size()-1; + } + + /// removeFromMBBNumbering - Remove the specific machine basic block from our + /// tracker, this is only really to be used by the MachineBasicBlock + /// implementation. + void removeFromMBBNumbering(unsigned N) { + assert(N < MBBNumbering.size() && "Illegal basic block #"); + MBBNumbering[N] = 0; + } + + /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead + /// of `new MachineInstr'. + /// + MachineInstr *CreateMachineInstr(const TargetInstrDesc &TID, + DebugLoc DL, + bool NoImp = false); + + /// CloneMachineInstr - Create a new MachineInstr which is a copy of the + /// 'Orig' instruction, identical in all ways except the the instruction + /// has no parent, prev, or next. + /// + MachineInstr *CloneMachineInstr(const MachineInstr *Orig); + + /// DeleteMachineInstr - Delete the given MachineInstr. + /// + void DeleteMachineInstr(MachineInstr *MI); + + /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this + /// instead of `new MachineBasicBlock'. + /// + MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = 0); + + /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock. + /// + void DeleteMachineBasicBlock(MachineBasicBlock *MBB); + + //===--------------------------------------------------------------------===// + // Debug location. + // + + /// getOrCreateDebugLocID - Look up the DebugLocTuple index with the given + /// source file, line, and column. If none currently exists, create a new + /// DebugLocTuple, and insert it into the DebugIdMap. + unsigned getOrCreateDebugLocID(GlobalVariable *CompileUnit, + unsigned Line, unsigned Col); + + /// getDebugLocTuple - Get the DebugLocTuple for a given DebugLoc object. + DebugLocTuple getDebugLocTuple(DebugLoc DL) const; + + /// getDefaultDebugLoc - Get the default debug location for the machine + /// function. + DebugLoc getDefaultDebugLoc() const { return DefaultDebugLoc; } + + /// setDefaultDebugLoc - Get the default debug location for the machine + /// function. + void setDefaultDebugLoc(DebugLoc DL) { DefaultDebugLoc = DL; } +}; + +//===--------------------------------------------------------------------===// +// GraphTraits specializations for function basic block graphs (CFGs) +//===--------------------------------------------------------------------===// + +// Provide specializations of GraphTraits to be able to treat a +// machine function as a graph of machine basic blocks... these are +// the same as the machine basic block iterators, except that the root +// node is implicitly the first node of the function. +// +template <> struct GraphTraits<MachineFunction*> : + public GraphTraits<MachineBasicBlock*> { + static NodeType *getEntryNode(MachineFunction *F) { + return &F->front(); + } + + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + typedef MachineFunction::iterator nodes_iterator; + static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); } + static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); } +}; +template <> struct GraphTraits<const MachineFunction*> : + public GraphTraits<const MachineBasicBlock*> { + static NodeType *getEntryNode(const MachineFunction *F) { + return &F->front(); + } + + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + typedef MachineFunction::const_iterator nodes_iterator; + static nodes_iterator nodes_begin(const MachineFunction *F) { + return F->begin(); + } + static nodes_iterator nodes_end (const MachineFunction *F) { + return F->end(); + } +}; + + +// Provide specializations of GraphTraits to be able to treat a function as a +// graph of basic blocks... and to walk it in inverse order. Inverse order for +// a function is considered to be when traversing the predecessor edges of a BB +// instead of the successor edges. +// +template <> struct GraphTraits<Inverse<MachineFunction*> > : + public GraphTraits<Inverse<MachineBasicBlock*> > { + static NodeType *getEntryNode(Inverse<MachineFunction*> G) { + return &G.Graph->front(); + } +}; +template <> struct GraphTraits<Inverse<const MachineFunction*> > : + public GraphTraits<Inverse<const MachineBasicBlock*> > { + static NodeType *getEntryNode(Inverse<const MachineFunction *> G) { + return &G.Graph->front(); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineFunctionPass.h b/include/llvm/CodeGen/MachineFunctionPass.h new file mode 100644 index 0000000..6b5e64a --- /dev/null +++ b/include/llvm/CodeGen/MachineFunctionPass.h @@ -0,0 +1,45 @@ +//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MachineFunctionPass class. MachineFunctionPass's are +// just FunctionPass's, except they operate on machine code as part of a code +// generator. Because they operate on machine code, not the LLVM +// representation, MachineFunctionPass's are not allowed to modify the LLVM +// representation. Due to this limitation, the MachineFunctionPass class takes +// care of declaring that no LLVM passes are invalidated. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINE_FUNCTION_PASS_H +#define LLVM_CODEGEN_MACHINE_FUNCTION_PASS_H + +#include "llvm/Pass.h" +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + + // FIXME: This pass should declare that the pass does not invalidate any LLVM + // passes. +struct MachineFunctionPass : public FunctionPass { + explicit MachineFunctionPass(intptr_t ID) : FunctionPass(ID) {} + explicit MachineFunctionPass(void *ID) : FunctionPass(ID) {} + +protected: + /// runOnMachineFunction - This method must be overloaded to perform the + /// desired machine code transformation or analysis. + /// + virtual bool runOnMachineFunction(MachineFunction &MF) = 0; + +public: + bool runOnFunction(Function &F); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h new file mode 100644 index 0000000..d61e5d8 --- /dev/null +++ b/include/llvm/CodeGen/MachineInstr.h @@ -0,0 +1,375 @@ +//===-- llvm/CodeGen/MachineInstr.h - MachineInstr class --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the MachineInstr class, which is the +// basic representation for all target dependent machine instructions used by +// the back end. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEINSTR_H +#define LLVM_CODEGEN_MACHINEINSTR_H + +#include "llvm/ADT/ilist.h" +#include "llvm/ADT/ilist_node.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/Target/TargetInstrDesc.h" +#include "llvm/CodeGen/DebugLoc.h" +#include <list> +#include <vector> + +namespace llvm { + +class TargetInstrDesc; +class TargetInstrInfo; +class TargetRegisterInfo; +class MachineFunction; + +//===----------------------------------------------------------------------===// +/// MachineInstr - Representation of each machine instruction. +/// +class MachineInstr : public ilist_node<MachineInstr> { + const TargetInstrDesc *TID; // Instruction descriptor. + unsigned short NumImplicitOps; // Number of implicit operands (which + // are determined at construction time). + + std::vector<MachineOperand> Operands; // the operands + std::list<MachineMemOperand> MemOperands; // information on memory references + MachineBasicBlock *Parent; // Pointer to the owning basic block. + DebugLoc debugLoc; // Source line information. + + // OperandComplete - Return true if it's illegal to add a new operand + bool OperandsComplete() const; + + MachineInstr(const MachineInstr&); // DO NOT IMPLEMENT + void operator=(const MachineInstr&); // DO NOT IMPLEMENT + + // Intrusive list support + friend struct ilist_traits<MachineInstr>; + friend struct ilist_traits<MachineBasicBlock>; + void setParent(MachineBasicBlock *P) { Parent = P; } + + /// MachineInstr ctor - This constructor creates a copy of the given + /// MachineInstr in the given MachineFunction. + MachineInstr(MachineFunction &, const MachineInstr &); + + /// MachineInstr ctor - This constructor creates a dummy MachineInstr with + /// TID NULL and no operands. + MachineInstr(); + + // The next two constructors have DebugLoc and non-DebugLoc versions; + // over time, the non-DebugLoc versions should be phased out and eventually + // removed. + + /// MachineInstr ctor - This constructor create a MachineInstr and add the + /// implicit operands. It reserves space for number of operands specified by + /// TargetInstrDesc. The version with a DebugLoc should be preferred. + explicit MachineInstr(const TargetInstrDesc &TID, bool NoImp = false); + + /// MachineInstr ctor - Work exactly the same as the ctor above, except that + /// the MachineInstr is created and added to the end of the specified basic + /// block. The version with a DebugLoc should be preferred. + /// + MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &TID); + + /// MachineInstr ctor - This constructor create a MachineInstr and add the + /// implicit operands. It reserves space for number of operands specified by + /// TargetInstrDesc. An explicit DebugLoc is supplied. + explicit MachineInstr(const TargetInstrDesc &TID, const DebugLoc dl, + bool NoImp = false); + + /// MachineInstr ctor - Work exactly the same as the ctor above, except that + /// the MachineInstr is created and added to the end of the specified basic + /// block. + /// + MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl, + const TargetInstrDesc &TID); + + ~MachineInstr(); + + // MachineInstrs are pool-allocated and owned by MachineFunction. + friend class MachineFunction; + +public: + const MachineBasicBlock* getParent() const { return Parent; } + MachineBasicBlock* getParent() { return Parent; } + + /// getDebugLoc - Returns the debug location id of this MachineInstr. + /// + const DebugLoc getDebugLoc() const { return debugLoc; } + + /// getDesc - Returns the target instruction descriptor of this + /// MachineInstr. + const TargetInstrDesc &getDesc() const { return *TID; } + + /// getOpcode - Returns the opcode of this MachineInstr. + /// + int getOpcode() const { return TID->Opcode; } + + /// Access to explicit operands of the instruction. + /// + unsigned getNumOperands() const { return (unsigned)Operands.size(); } + + const MachineOperand& getOperand(unsigned i) const { + assert(i < getNumOperands() && "getOperand() out of range!"); + return Operands[i]; + } + MachineOperand& getOperand(unsigned i) { + assert(i < getNumOperands() && "getOperand() out of range!"); + return Operands[i]; + } + + /// getNumExplicitOperands - Returns the number of non-implicit operands. + /// + unsigned getNumExplicitOperands() const; + + /// Access to memory operands of the instruction + std::list<MachineMemOperand>::iterator memoperands_begin() + { return MemOperands.begin(); } + std::list<MachineMemOperand>::iterator memoperands_end() + { return MemOperands.end(); } + std::list<MachineMemOperand>::const_iterator memoperands_begin() const + { return MemOperands.begin(); } + std::list<MachineMemOperand>::const_iterator memoperands_end() const + { return MemOperands.end(); } + bool memoperands_empty() const { return MemOperands.empty(); } + + /// hasOneMemOperand - Return true if this instruction has exactly one + /// MachineMemOperand. + bool hasOneMemOperand() const { + return !memoperands_empty() && + next(memoperands_begin()) == memoperands_end(); + } + + /// isIdenticalTo - Return true if this instruction is identical to (same + /// opcode and same operands as) the specified instruction. + bool isIdenticalTo(const MachineInstr *Other) const { + if (Other->getOpcode() != getOpcode() || + Other->getNumOperands() != getNumOperands()) + return false; + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (!getOperand(i).isIdenticalTo(Other->getOperand(i))) + return false; + return true; + } + + /// removeFromParent - This method unlinks 'this' from the containing basic + /// block, and returns it, but does not delete it. + MachineInstr *removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing basic + /// block and deletes it. + void eraseFromParent(); + + /// isLabel - Returns true if the MachineInstr represents a label. + /// + bool isLabel() const; + + /// isDebugLabel - Returns true if the MachineInstr represents a debug label. + /// + bool isDebugLabel() const; + + /// readsRegister - Return true if the MachineInstr reads the specified + /// register. If TargetRegisterInfo is passed, then it also checks if there + /// is a read of a super-register. + bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const { + return findRegisterUseOperandIdx(Reg, false, TRI) != -1; + } + + /// killsRegister - Return true if the MachineInstr kills the specified + /// register. If TargetRegisterInfo is passed, then it also checks if there is + /// a kill of a super-register. + bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const { + return findRegisterUseOperandIdx(Reg, true, TRI) != -1; + } + + /// modifiesRegister - Return true if the MachineInstr modifies the + /// specified register. If TargetRegisterInfo is passed, then it also checks + /// if there is a def of a super-register. + bool modifiesRegister(unsigned Reg, + const TargetRegisterInfo *TRI = NULL) const { + return findRegisterDefOperandIdx(Reg, false, TRI) != -1; + } + + /// registerDefIsDead - Returns true if the register is dead in this machine + /// instruction. If TargetRegisterInfo is passed, then it also checks + /// if there is a dead def of a super-register. + bool registerDefIsDead(unsigned Reg, + const TargetRegisterInfo *TRI = NULL) const { + return findRegisterDefOperandIdx(Reg, true, TRI) != -1; + } + + /// findRegisterUseOperandIdx() - Returns the operand index that is a use of + /// the specific register or -1 if it is not found. It further tightening + /// the search criteria to a use that kills the register if isKill is true. + int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false, + const TargetRegisterInfo *TRI = NULL) const; + + /// findRegisterUseOperand - Wrapper for findRegisterUseOperandIdx, it returns + /// a pointer to the MachineOperand rather than an index. + MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false, + const TargetRegisterInfo *TRI = NULL) { + int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI); + return (Idx == -1) ? NULL : &getOperand(Idx); + } + + /// findRegisterDefOperandIdx() - Returns the operand index that is a def of + /// the specified register or -1 if it is not found. If isDead is true, defs + /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it + /// also checks if there is a def of a super-register. + int findRegisterDefOperandIdx(unsigned Reg, bool isDead = false, + const TargetRegisterInfo *TRI = NULL) const; + + /// findRegisterDefOperand - Wrapper for findRegisterDefOperandIdx, it returns + /// a pointer to the MachineOperand rather than an index. + MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false, + const TargetRegisterInfo *TRI = NULL) { + int Idx = findRegisterDefOperandIdx(Reg, isDead, TRI); + return (Idx == -1) ? NULL : &getOperand(Idx); + } + + /// findFirstPredOperandIdx() - Find the index of the first operand in the + /// operand list that is used to represent the predicate. It returns -1 if + /// none is found. + int findFirstPredOperandIdx() const; + + /// isRegTiedToUseOperand - Given the index of a register def operand, + /// check if the register def is tied to a source operand, due to either + /// two-address elimination or inline assembly constraints. Returns the + /// first tied use operand index by reference is UseOpIdx is not null. + bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const; + + /// isRegTiedToDefOperand - Return true if the use operand of the specified + /// index is tied to an def operand. It also returns the def operand index by + /// reference if DefOpIdx is not null. + bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const; + + /// copyKillDeadInfo - Copies kill / dead operand properties from MI. + /// + void copyKillDeadInfo(const MachineInstr *MI); + + /// copyPredicates - Copies predicate operand(s) from MI. + void copyPredicates(const MachineInstr *MI); + + /// addRegisterKilled - We have determined MI kills a register. Look for the + /// operand that uses it and mark it as IsKill. If AddIfNotFound is true, + /// add a implicit operand if it's not found. Returns true if the operand + /// exists / is added. + bool addRegisterKilled(unsigned IncomingReg, + const TargetRegisterInfo *RegInfo, + bool AddIfNotFound = false); + + /// addRegisterDead - We have determined MI defined a register without a use. + /// Look for the operand that defines it and mark it as IsDead. If + /// AddIfNotFound is true, add a implicit operand if it's not found. Returns + /// true if the operand exists / is added. + bool addRegisterDead(unsigned IncomingReg, const TargetRegisterInfo *RegInfo, + bool AddIfNotFound = false); + + /// isSafeToMove - Return true if it is safe to move this instruction. If + /// SawStore is set to true, it means that there is a store (or call) between + /// the instruction's location and its intended destination. + bool isSafeToMove(const TargetInstrInfo *TII, bool &SawStore) const; + + /// isSafeToReMat - Return true if it's safe to rematerialize the specified + /// instruction which defined the specified register instead of copying it. + bool isSafeToReMat(const TargetInstrInfo *TII, unsigned DstReg) const; + + /// hasVolatileMemoryRef - Return true if this instruction may have a + /// volatile memory reference, or if the information describing the + /// memory reference is not available. Return false if it is known to + /// have no volatile memory references. + bool hasVolatileMemoryRef() const; + + // + // Debugging support + // + void print(std::ostream *OS, const TargetMachine *TM) const { + if (OS) print(*OS, TM); + } + void print(std::ostream &OS, const TargetMachine *TM = 0) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + void print(raw_ostream *OS, const TargetMachine *TM) const { + if (OS) print(*OS, TM); + } + void print(raw_ostream &OS, const TargetMachine *TM = 0) const; + void print(raw_ostream *OS) const { if (OS) print(*OS); } + void dump() const; + + //===--------------------------------------------------------------------===// + // Accessors used to build up machine instructions. + + /// addOperand - Add the specified operand to the instruction. If it is an + /// implicit operand, it is added to the end of the operand list. If it is + /// an explicit operand it is added at the end of the explicit operand list + /// (before the first implicit operand). + void addOperand(const MachineOperand &Op); + + /// setDesc - Replace the instruction descriptor (thus opcode) of + /// the current instruction with a new one. + /// + void setDesc(const TargetInstrDesc &tid) { TID = &tid; } + + /// setDebugLoc - Replace current source information with new such. + /// Avoid using this, the constructor argument is preferable. + /// + void setDebugLoc(const DebugLoc dl) { debugLoc = dl; } + + /// RemoveOperand - Erase an operand from an instruction, leaving it with one + /// fewer operand than it started with. + /// + void RemoveOperand(unsigned i); + + /// addMemOperand - Add a MachineMemOperand to the machine instruction, + /// referencing arbitrary storage. + void addMemOperand(MachineFunction &MF, + const MachineMemOperand &MO); + + /// clearMemOperands - Erase all of this MachineInstr's MachineMemOperands. + void clearMemOperands(MachineFunction &MF); + +private: + /// getRegInfo - If this instruction is embedded into a MachineFunction, + /// return the MachineRegisterInfo object for the current function, otherwise + /// return null. + MachineRegisterInfo *getRegInfo(); + + /// addImplicitDefUseOperands - Add all implicit def and use operands to + /// this instruction. + void addImplicitDefUseOperands(); + + /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in + /// this instruction from their respective use lists. This requires that the + /// operands already be on their use lists. + void RemoveRegOperandsFromUseLists(); + + /// AddRegOperandsToUseLists - Add all of the register operands in + /// this instruction from their respective use lists. This requires that the + /// operands not be on their use lists yet. + void AddRegOperandsToUseLists(MachineRegisterInfo &RegInfo); +}; + +//===----------------------------------------------------------------------===// +// Debugging Support + +inline std::ostream& operator<<(std::ostream &OS, const MachineInstr &MI) { + MI.print(OS); + return OS; +} + +inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) { + MI.print(OS); + return OS; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h new file mode 100644 index 0000000..d3a0995 --- /dev/null +++ b/include/llvm/CodeGen/MachineInstrBuilder.h @@ -0,0 +1,225 @@ +//===-- CodeGen/MachineInstBuilder.h - Simplify creation of MIs -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes a function named BuildMI, which is useful for dramatically +// simplifying how MachineInstr's are created. It allows use of code like this: +// +// M = BuildMI(X86::ADDrr8, 2).addReg(argVal1).addReg(argVal2); +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H +#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H + +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + +class TargetInstrDesc; + +namespace RegState { + enum { + Define = 0x2, + Implicit = 0x4, + Kill = 0x8, + Dead = 0x10, + EarlyClobber = 0x20, + ImplicitDefine = Implicit | Define, + ImplicitKill = Implicit | Kill + }; +} + +class MachineInstrBuilder { + MachineInstr *MI; +public: + explicit MachineInstrBuilder(MachineInstr *mi) : MI(mi) {} + + /// Allow automatic conversion to the machine instruction we are working on. + /// + operator MachineInstr*() const { return MI; } + operator MachineBasicBlock::iterator() const { return MI; } + + /// addReg - Add a new virtual register operand... + /// + const + MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0, + unsigned SubReg = 0) const { + assert((flags & 0x1) == 0 && + "Passing in 'true' to addReg is forbidden! Use enums instead."); + MI->addOperand(MachineOperand::CreateReg(RegNo, + flags & RegState::Define, + flags & RegState::Implicit, + flags & RegState::Kill, + flags & RegState::Dead, + SubReg, + flags & RegState::EarlyClobber)); + return *this; + } + + /// addImm - Add a new immediate operand. + /// + const MachineInstrBuilder &addImm(int64_t Val) const { + MI->addOperand(MachineOperand::CreateImm(Val)); + return *this; + } + + const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const { + MI->addOperand(MachineOperand::CreateFPImm(Val)); + return *this; + } + + const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB) const { + MI->addOperand(MachineOperand::CreateMBB(MBB)); + return *this; + } + + const MachineInstrBuilder &addFrameIndex(unsigned Idx) const { + MI->addOperand(MachineOperand::CreateFI(Idx)); + return *this; + } + + const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx, + int Offset = 0) const { + MI->addOperand(MachineOperand::CreateCPI(Idx, Offset)); + return *this; + } + + const MachineInstrBuilder &addJumpTableIndex(unsigned Idx) const { + MI->addOperand(MachineOperand::CreateJTI(Idx)); + return *this; + } + + const MachineInstrBuilder &addGlobalAddress(GlobalValue *GV, + int64_t Offset = 0) const { + MI->addOperand(MachineOperand::CreateGA(GV, Offset)); + return *this; + } + + const MachineInstrBuilder &addExternalSymbol(const char *FnName, + int64_t Offset = 0) const { + MI->addOperand(MachineOperand::CreateES(FnName, Offset)); + return *this; + } + + const MachineInstrBuilder &addMemOperand(const MachineMemOperand &MMO) const { + MI->addMemOperand(*MI->getParent()->getParent(), MMO); + return *this; + } + + const MachineInstrBuilder &addOperand(const MachineOperand &MO) const { + if (MO.isReg()) + return addReg(MO.getReg(), + (MO.isDef() ? RegState::Define : 0) | + (MO.isImplicit() ? RegState::Implicit : 0) | + (MO.isKill() ? RegState::Kill : 0) | + (MO.isDead() ? RegState::Dead : 0) | + (MO.isEarlyClobber() ? RegState::EarlyClobber : 0), + MO.getSubReg()); + if (MO.isImm()) + return addImm(MO.getImm()); + if (MO.isFI()) + return addFrameIndex(MO.getIndex()); + if (MO.isGlobal()) + return addGlobalAddress(MO.getGlobal(), MO.getOffset()); + if (MO.isCPI()) + return addConstantPoolIndex(MO.getIndex(), MO.getOffset()); + if (MO.isSymbol()) + return addExternalSymbol(MO.getSymbolName()); + if (MO.isJTI()) + return addJumpTableIndex(MO.getIndex()); + + assert(0 && "Unknown operand for MachineInstrBuilder::AddOperand!"); + return *this; + } +}; + +/// BuildMI - Builder interface. Specify how to create the initial instruction +/// itself. +/// +inline MachineInstrBuilder BuildMI(MachineFunction &MF, + DebugLoc DL, + const TargetInstrDesc &TID) { + return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL)); +} + +/// BuildMI - This version of the builder sets up the first operand as a +/// destination virtual register. +/// +inline MachineInstrBuilder BuildMI(MachineFunction &MF, + DebugLoc DL, + const TargetInstrDesc &TID, + unsigned DestReg) { + return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL)) + .addReg(DestReg, RegState::Define); +} + +/// BuildMI - This version of the builder inserts the newly-built +/// instruction before the given position in the given MachineBasicBlock, and +/// sets up the first operand as a destination virtual register. +/// +inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, + MachineBasicBlock::iterator I, + DebugLoc DL, + const TargetInstrDesc &TID, + unsigned DestReg) { + MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL); + BB.insert(I, MI); + return MachineInstrBuilder(MI).addReg(DestReg, RegState::Define); +} + +/// BuildMI - This version of the builder inserts the newly-built +/// instruction before the given position in the given MachineBasicBlock, and +/// does NOT take a destination register. +/// +inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, + MachineBasicBlock::iterator I, + DebugLoc DL, + const TargetInstrDesc &TID) { + MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL); + BB.insert(I, MI); + return MachineInstrBuilder(MI); +} + +/// BuildMI - This version of the builder inserts the newly-built +/// instruction at the end of the given MachineBasicBlock, and does NOT take a +/// destination register. +/// +inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, + DebugLoc DL, + const TargetInstrDesc &TID) { + return BuildMI(*BB, BB->end(), DL, TID); +} + +/// BuildMI - This version of the builder inserts the newly-built +/// instruction at the end of the given MachineBasicBlock, and sets up the first +/// operand as a destination virtual register. +/// +inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, + DebugLoc DL, + const TargetInstrDesc &TID, + unsigned DestReg) { + return BuildMI(*BB, BB->end(), DL, TID, DestReg); +} + +inline unsigned getDefRegState(bool B) { + return B ? RegState::Define : 0; +} +inline unsigned getImplRegState(bool B) { + return B ? RegState::Implicit : 0; +} +inline unsigned getKillRegState(bool B) { + return B ? RegState::Kill : 0; +} +inline unsigned getDeadRegState(bool B) { + return B ? RegState::Dead : 0; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineJumpTableInfo.h b/include/llvm/CodeGen/MachineJumpTableInfo.h new file mode 100644 index 0000000..56e2e54 --- /dev/null +++ b/include/llvm/CodeGen/MachineJumpTableInfo.h @@ -0,0 +1,92 @@ +//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The MachineJumpTableInfo class keeps track of jump tables referenced by +// lowered switch instructions in the MachineFunction. +// +// Instructions reference the address of these jump tables through the use of +// MO_JumpTableIndex values. When emitting assembly or machine code, these +// virtual address references are converted to refer to the address of the +// function jump tables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H +#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H + +#include <vector> +#include <iosfwd> +#include <cassert> + +namespace llvm { + +class MachineBasicBlock; +class TargetData; + +/// MachineJumpTableEntry - One jump table in the jump table info. +/// +struct MachineJumpTableEntry { + /// MBBs - The vector of basic blocks from which to create the jump table. + std::vector<MachineBasicBlock*> MBBs; + + explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M) + : MBBs(M) {} +}; + +class MachineJumpTableInfo { + unsigned EntrySize; + unsigned Alignment; + std::vector<MachineJumpTableEntry> JumpTables; +public: + MachineJumpTableInfo(unsigned Size, unsigned Align) + : EntrySize(Size), Alignment(Align) {} + + /// getJumpTableIndex - Create a new jump table or return an existing one. + /// + unsigned getJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs); + + /// isEmpty - Return true if there are no jump tables. + /// + bool isEmpty() const { return JumpTables.empty(); } + + const std::vector<MachineJumpTableEntry> &getJumpTables() const { + return JumpTables; + } + + /// RemoveJumpTable - Mark the specific index as being dead. This will cause + /// it to not be emitted. + void RemoveJumpTable(unsigned Idx) { + JumpTables[Idx].MBBs.clear(); + } + + /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update + /// the jump tables to branch to New instead. + bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New); + + /// getEntrySize - Returns the size of an individual field in a jump table. + /// + unsigned getEntrySize() const { return EntrySize; } + + /// getAlignment - returns the target's preferred alignment for jump tables + unsigned getAlignment() const { return Alignment; } + + /// print - Used by the MachineFunction printer to print information about + /// jump tables. Implemented in MachineFunction.cpp + /// + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + /// dump - Call print(std::cerr) to be called from the debugger. + /// + void dump() const; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineLocation.h b/include/llvm/CodeGen/MachineLocation.h new file mode 100644 index 0000000..2db4e55 --- /dev/null +++ b/include/llvm/CodeGen/MachineLocation.h @@ -0,0 +1,106 @@ +//===-- llvm/CodeGen/MachineLocation.h --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// The MachineLocation class is used to represent a simple location in a machine +// frame. Locations will be one of two forms; a register or an address formed +// from a base address plus an offset. Register indirection can be specified by +// using an offset of zero. +// +// The MachineMove class is used to represent abstract move operations in the +// prolog/epilog of a compiled function. A collection of these objects can be +// used by a debug consumer to track the location of values when unwinding stack +// frames. +//===----------------------------------------------------------------------===// + + +#ifndef LLVM_CODEGEN_MACHINELOCATION_H +#define LLVM_CODEGEN_MACHINELOCATION_H + +namespace llvm { + +class MachineLocation { +private: + bool IsRegister; // True if location is a register. + unsigned Register; // gcc/gdb register number. + int Offset; // Displacement if not register. + +public: + enum { + // The target register number for an abstract frame pointer. The value is + // an arbitrary value greater than TargetRegisterInfo::FirstVirtualRegister. + VirtualFP = ~0U + }; + MachineLocation() + : IsRegister(false) + , Register(0) + , Offset(0) + {} + explicit MachineLocation(unsigned R) + : IsRegister(true) + , Register(R) + , Offset(0) + {} + MachineLocation(unsigned R, int O) + : IsRegister(false) + , Register(R) + , Offset(O) + {} + + // Accessors + bool isReg() const { return IsRegister; } + unsigned getReg() const { return Register; } + int getOffset() const { return Offset; } + void setIsRegister(bool Is) { IsRegister = Is; } + void setRegister(unsigned R) { Register = R; } + void setOffset(int O) { Offset = O; } + void set(unsigned R) { + IsRegister = true; + Register = R; + Offset = 0; + } + void set(unsigned R, int O) { + IsRegister = false; + Register = R; + Offset = O; + } + +#ifndef NDEBUG + void dump(); +#endif +}; + +class MachineMove { +private: + unsigned LabelID; // Label ID number for post-instruction + // address when result of move takes + // effect. + MachineLocation Destination; // Move to location. + MachineLocation Source; // Move from location. + +public: + MachineMove() + : LabelID(0) + , Destination() + , Source() + {} + + MachineMove(unsigned ID, MachineLocation &D, MachineLocation &S) + : LabelID(ID) + , Destination(D) + , Source(S) + {} + + // Accessors + unsigned getLabelID() const { return LabelID; } + const MachineLocation &getDestination() const { return Destination; } + const MachineLocation &getSource() const { return Source; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineLoopInfo.h b/include/llvm/CodeGen/MachineLoopInfo.h new file mode 100644 index 0000000..8c96308 --- /dev/null +++ b/include/llvm/CodeGen/MachineLoopInfo.h @@ -0,0 +1,188 @@ +//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MachineLoopInfo class that is used to identify natural +// loops and determine the loop depth of various nodes of the CFG. Note that +// natural loops may actually be several loops that share the same header node. +// +// This analysis calculates the nesting structure of loops in a function. For +// each natural loop identified, this analysis identifies natural loops +// contained entirely within the loop and the basic blocks the make up the loop. +// +// It can calculate on the fly various bits of information, for example: +// +// * whether there is a preheader for the loop +// * the number of back edges to the header +// * whether or not a particular block branches out of the loop +// * the successor blocks of the loop +// * the loop depth +// * the trip count +// * etc... +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINE_LOOP_INFO_H +#define LLVM_CODEGEN_MACHINE_LOOP_INFO_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Analysis/LoopInfo.h" + +namespace llvm { + +// Provide overrides for Loop methods that don't make sense for machine loops. +template<> inline +PHINode *LoopBase<MachineBasicBlock>::getCanonicalInductionVariable() const { + assert(0 && "getCanonicalInductionVariable not supported for machine loops!"); + return 0; +} + +template<> inline Instruction* +LoopBase<MachineBasicBlock>::getCanonicalInductionVariableIncrement() const { + assert(0 && + "getCanonicalInductionVariableIncrement not supported for machine loops!"); + return 0; +} + +template<> +inline bool LoopBase<MachineBasicBlock>::isLoopInvariant(Value *V) const { + assert(0 && "isLoopInvariant not supported for machine loops!"); + return false; +} + +template<> +inline Value *LoopBase<MachineBasicBlock>::getTripCount() const { + assert(0 && "getTripCount not supported for machine loops!"); + return 0; +} + +template<> +inline bool LoopBase<MachineBasicBlock>::isLCSSAForm() const { + assert(0 && "isLCSSAForm not supported for machine loops"); + return false; +} + +typedef LoopBase<MachineBasicBlock> MachineLoop; + +class MachineLoopInfo : public MachineFunctionPass { + LoopInfoBase<MachineBasicBlock>* LI; + friend class LoopBase<MachineBasicBlock>; + + LoopInfoBase<MachineBasicBlock>& getBase() { return *LI; } +public: + static char ID; // Pass identification, replacement for typeid + + MachineLoopInfo() : MachineFunctionPass(&ID) { + LI = new LoopInfoBase<MachineBasicBlock>(); + } + + ~MachineLoopInfo() { delete LI; } + + /// iterator/begin/end - The interface to the top-level loops in the current + /// function. + /// + typedef std::vector<MachineLoop*>::const_iterator iterator; + inline iterator begin() const { return LI->begin(); } + inline iterator end() const { return LI->end(); } + bool empty() const { return LI->empty(); } + + /// getLoopFor - Return the inner most loop that BB lives in. If a basic + /// block is in no loop (for example the entry node), null is returned. + /// + inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const { + return LI->getLoopFor(BB); + } + + /// operator[] - same as getLoopFor... + /// + inline const MachineLoop *operator[](const MachineBasicBlock *BB) const { + return LI->getLoopFor(BB); + } + + /// getLoopDepth - Return the loop nesting level of the specified block... + /// + inline unsigned getLoopDepth(const MachineBasicBlock *BB) const { + return LI->getLoopDepth(BB); + } + + // isLoopHeader - True if the block is a loop header node + inline bool isLoopHeader(MachineBasicBlock *BB) const { + return LI->isLoopHeader(BB); + } + + /// runOnFunction - Calculate the natural loop information. + /// + virtual bool runOnMachineFunction(MachineFunction &F); + + virtual void releaseMemory() { LI->releaseMemory(); } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + /// removeLoop - This removes the specified top-level loop from this loop info + /// object. The loop is not deleted, as it will presumably be inserted into + /// another loop. + inline MachineLoop *removeLoop(iterator I) { return LI->removeLoop(I); } + + /// changeLoopFor - Change the top-level loop that contains BB to the + /// specified loop. This should be used by transformations that restructure + /// the loop hierarchy tree. + inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) { + LI->changeLoopFor(BB, L); + } + + /// changeTopLevelLoop - Replace the specified loop in the top-level loops + /// list with the indicated loop. + inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) { + LI->changeTopLevelLoop(OldLoop, NewLoop); + } + + /// addTopLevelLoop - This adds the specified loop to the collection of + /// top-level loops. + inline void addTopLevelLoop(MachineLoop *New) { + LI->addTopLevelLoop(New); + } + + /// removeBlock - This method completely removes BB from all data structures, + /// including all of the Loop objects it is nested in and our mapping from + /// MachineBasicBlocks to loops. + void removeBlock(MachineBasicBlock *BB) { + LI->removeBlock(BB); + } +}; + + +// Allow clients to walk the list of nested loops... +template <> struct GraphTraits<const MachineLoop*> { + typedef const MachineLoop NodeType; + typedef std::vector<MachineLoop*>::const_iterator ChildIteratorType; + + static NodeType *getEntryNode(const MachineLoop *L) { return L; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->end(); + } +}; + +template <> struct GraphTraits<MachineLoop*> { + typedef MachineLoop NodeType; + typedef std::vector<MachineLoop*>::const_iterator ChildIteratorType; + + static NodeType *getEntryNode(MachineLoop *L) { return L; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->end(); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h new file mode 100644 index 0000000..4388c0a --- /dev/null +++ b/include/llvm/CodeGen/MachineMemOperand.h @@ -0,0 +1,86 @@ +//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the MachineMemOperand class, which is a +// description of a memory reference. It is used to help track dependencies +// in the backend. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H +#define LLVM_CODEGEN_MACHINEMEMOPERAND_H + +namespace llvm { + +class Value; +class FoldingSetNodeID; + +//===----------------------------------------------------------------------===// +/// MachineMemOperand - A description of a memory reference used in the backend. +/// Instead of holding a StoreInst or LoadInst, this class holds the address +/// Value of the reference along with a byte size and offset. This allows it +/// to describe lowered loads and stores. Also, the special PseudoSourceValue +/// objects can be used to represent loads and stores to memory locations +/// that aren't explicit in the regular LLVM IR. +/// +class MachineMemOperand { + int64_t Offset; + uint64_t Size; + const Value *V; + unsigned int Flags; + +public: + /// Flags values. These may be or'd together. + enum MemOperandFlags { + /// The memory access reads data. + MOLoad = 1, + /// The memory access writes data. + MOStore = 2, + /// The memory access is volatile. + MOVolatile = 4 + }; + + /// MachineMemOperand - Construct an MachineMemOperand object with the + /// specified address Value, flags, offset, size, and alignment. + MachineMemOperand(const Value *v, unsigned int f, int64_t o, uint64_t s, + unsigned int a); + + /// getValue - Return the base address of the memory access. + /// Special values are PseudoSourceValue::FPRel, PseudoSourceValue::SPRel, + /// and the other PseudoSourceValue members which indicate references to + /// frame/stack pointer relative references and other special references. + const Value *getValue() const { return V; } + + /// getFlags - Return the raw flags of the source value, \see MemOperandFlags. + unsigned int getFlags() const { return Flags & 7; } + + /// getOffset - For normal values, this is a byte offset added to the base + /// address. For PseudoSourceValue::FPRel values, this is the FrameIndex + /// number. + int64_t getOffset() const { return Offset; } + + /// getSize - Return the size in bytes of the memory reference. + uint64_t getSize() const { return Size; } + + /// getAlignment - Return the minimum known alignment in bytes of the + /// memory reference. + unsigned int getAlignment() const { return (1u << (Flags >> 3)) >> 1; } + + bool isLoad() const { return Flags & MOLoad; } + bool isStore() const { return Flags & MOStore; } + bool isVolatile() const { return Flags & MOVolatile; } + + /// Profile - Gather unique data for the object. + /// + void Profile(FoldingSetNodeID &ID) const; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h new file mode 100644 index 0000000..1872bd2 --- /dev/null +++ b/include/llvm/CodeGen/MachineModuleInfo.h @@ -0,0 +1,300 @@ +//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Collect meta information for a module. This information should be in a +// neutral form that can be used by different debugging and exception handling +// schemes. +// +// The organization of information is primarily clustered around the source +// compile units. The main exception is source line correspondence where +// inlining may interleave code from various compile units. +// +// The following information can be retrieved from the MachineModuleInfo. +// +// -- Source directories - Directories are uniqued based on their canonical +// string and assigned a sequential numeric ID (base 1.) +// -- Source files - Files are also uniqued based on their name and directory +// ID. A file ID is sequential number (base 1.) +// -- Source line correspondence - A vector of file ID, line#, column# triples. +// A DEBUG_LOCATION instruction is generated by the DAG Legalizer +// corresponding to each entry in the source line list. This allows a debug +// emitter to generate labels referenced by debug information tables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H +#define LLVM_CODEGEN_MACHINEMODULEINFO_H + +#include "llvm/Support/Dwarf.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/UniqueVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/CodeGen/MachineLocation.h" +#include "llvm/GlobalValue.h" +#include "llvm/Pass.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Forward declarations. +class Constant; +class GlobalVariable; +class MachineBasicBlock; +class MachineFunction; +class Module; +class PointerType; +class StructType; + +//===----------------------------------------------------------------------===// +/// LandingPadInfo - This structure is used to retain landing pad info for +/// the current function. +/// +struct LandingPadInfo { + MachineBasicBlock *LandingPadBlock; // Landing pad block. + SmallVector<unsigned, 1> BeginLabels; // Labels prior to invoke. + SmallVector<unsigned, 1> EndLabels; // Labels after invoke. + unsigned LandingPadLabel; // Label at beginning of landing pad. + Function *Personality; // Personality function. + std::vector<int> TypeIds; // List of type ids (filters negative) + + explicit LandingPadInfo(MachineBasicBlock *MBB) + : LandingPadBlock(MBB) + , LandingPadLabel(0) + , Personality(NULL) + {} +}; + +//===----------------------------------------------------------------------===// +/// MachineModuleInfo - This class contains meta information specific to a +/// module. Queries can be made by different debugging and exception handling +/// schemes and reformated for specific use. +/// +class MachineModuleInfo : public ImmutablePass { +private: + // LabelIDList - One entry per assigned label. Normally the entry is equal to + // the list index(+1). If the entry is zero then the label has been deleted. + // Any other value indicates the label has been deleted by is mapped to + // another label. + std::vector<unsigned> LabelIDList; + + // FrameMoves - List of moves done by a function's prolog. Used to construct + // frame maps by debug and exception handling consumers. + std::vector<MachineMove> FrameMoves; + + // LandingPads - List of LandingPadInfo describing the landing pad information + // in the current function. + std::vector<LandingPadInfo> LandingPads; + + // TypeInfos - List of C++ TypeInfo used in the current function. + // + std::vector<GlobalVariable *> TypeInfos; + + // FilterIds - List of typeids encoding filters used in the current function. + // + std::vector<unsigned> FilterIds; + + // FilterEnds - List of the indices in FilterIds corresponding to filter + // terminators. + // + std::vector<unsigned> FilterEnds; + + // Personalities - Vector of all personality functions ever seen. Used to emit + // common EH frames. + std::vector<Function *> Personalities; + + // UsedFunctions - the functions in the llvm.used list in a more easily + // searchable format. + SmallPtrSet<const Function *, 32> UsedFunctions; + + /// UsedDbgLabels - labels are used by debug info entries. + SmallSet<unsigned, 8> UsedDbgLabels; + + bool CallsEHReturn; + bool CallsUnwindInit; + + /// DbgInfoAvailable - True if debugging information is available + /// in this module. + bool DbgInfoAvailable; +public: + static char ID; // Pass identification, replacement for typeid + + MachineModuleInfo(); + ~MachineModuleInfo(); + + /// doInitialization - Initialize the state for a new module. + /// + bool doInitialization(); + + /// doFinalization - Tear down the state after completion of a module. + /// + bool doFinalization(); + + /// BeginFunction - Begin gathering function meta information. + /// + void BeginFunction(MachineFunction *MF); + + /// EndFunction - Discard function meta information. + /// + void EndFunction(); + + /// AnalyzeModule - Scan the module for global debug information. + /// + void AnalyzeModule(Module &M); + + /// hasDebugInfo - Returns true if valid debug info is present. + /// + bool hasDebugInfo() const { return DbgInfoAvailable; } + void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = true; } + + bool callsEHReturn() const { return CallsEHReturn; } + void setCallsEHReturn(bool b) { CallsEHReturn = b; } + + bool callsUnwindInit() const { return CallsUnwindInit; } + void setCallsUnwindInit(bool b) { CallsUnwindInit = b; } + + /// NextLabelID - Return the next unique label id. + /// + unsigned NextLabelID() { + unsigned ID = (unsigned)LabelIDList.size() + 1; + LabelIDList.push_back(ID); + return ID; + } + + /// InvalidateLabel - Inhibit use of the specified label # from + /// MachineModuleInfo, for example because the code was deleted. + void InvalidateLabel(unsigned LabelID) { + // Remap to zero to indicate deletion. + RemapLabel(LabelID, 0); + } + + /// RemapLabel - Indicate that a label has been merged into another. + /// + void RemapLabel(unsigned OldLabelID, unsigned NewLabelID) { + assert(0 < OldLabelID && OldLabelID <= LabelIDList.size() && + "Old label ID out of range."); + assert(NewLabelID <= LabelIDList.size() && + "New label ID out of range."); + LabelIDList[OldLabelID - 1] = NewLabelID; + } + + /// MappedLabel - Find out the label's final ID. Zero indicates deletion. + /// ID != Mapped ID indicates that the label was folded into another label. + unsigned MappedLabel(unsigned LabelID) const { + assert(LabelID <= LabelIDList.size() && "Debug label ID out of range."); + return LabelID ? LabelIDList[LabelID - 1] : 0; + } + + /// isDbgLabelUsed - Return true if label with LabelID is used by + /// DwarfWriter. + bool isDbgLabelUsed(unsigned LabelID) { + return UsedDbgLabels.count(LabelID); + } + + /// RecordUsedDbgLabel - Mark label with LabelID as used. This is used + /// by DwarfWriter to inform DebugLabelFolder that certain labels are + /// not to be deleted. + void RecordUsedDbgLabel(unsigned LabelID) { + UsedDbgLabels.insert(LabelID); + } + + /// getFrameMoves - Returns a reference to a list of moves done in the current + /// function's prologue. Used to construct frame maps for debug and exception + /// handling comsumers. + std::vector<MachineMove> &getFrameMoves() { return FrameMoves; } + + //===-EH-----------------------------------------------------------------===// + + /// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the + /// specified MachineBasicBlock. + LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad); + + /// addInvoke - Provide the begin and end labels of an invoke style call and + /// associate it with a try landing pad block. + void addInvoke(MachineBasicBlock *LandingPad, unsigned BeginLabel, + unsigned EndLabel); + + /// addLandingPad - Add a new panding pad. Returns the label ID for the + /// landing pad entry. + unsigned addLandingPad(MachineBasicBlock *LandingPad); + + /// addPersonality - Provide the personality function for the exception + /// information. + void addPersonality(MachineBasicBlock *LandingPad, Function *Personality); + + /// getPersonalityIndex - Get index of the current personality function inside + /// Personalitites array + unsigned getPersonalityIndex() const; + + /// getPersonalities - Return array of personality functions ever seen. + const std::vector<Function *>& getPersonalities() const { + return Personalities; + } + + // UsedFunctions - Return set of the functions in the llvm.used list. + const SmallPtrSet<const Function *, 32>& getUsedFunctions() const { + return UsedFunctions; + } + + /// addCatchTypeInfo - Provide the catch typeinfo for a landing pad. + /// + void addCatchTypeInfo(MachineBasicBlock *LandingPad, + std::vector<GlobalVariable *> &TyInfo); + + /// addFilterTypeInfo - Provide the filter typeinfo for a landing pad. + /// + void addFilterTypeInfo(MachineBasicBlock *LandingPad, + std::vector<GlobalVariable *> &TyInfo); + + /// addCleanup - Add a cleanup action for a landing pad. + /// + void addCleanup(MachineBasicBlock *LandingPad); + + /// getTypeIDFor - Return the type id for the specified typeinfo. This is + /// function wide. + unsigned getTypeIDFor(GlobalVariable *TI); + + /// getFilterIDFor - Return the id of the filter encoded by TyIds. This is + /// function wide. + int getFilterIDFor(std::vector<unsigned> &TyIds); + + /// TidyLandingPads - Remap landing pad labels and remove any deleted landing + /// pads. + void TidyLandingPads(); + + /// getLandingPads - Return a reference to the landing pad info for the + /// current function. + const std::vector<LandingPadInfo> &getLandingPads() const { + return LandingPads; + } + + /// getTypeInfos - Return a reference to the C++ typeinfo for the current + /// function. + const std::vector<GlobalVariable *> &getTypeInfos() const { + return TypeInfos; + } + + /// getFilterIds - Return a reference to the typeids encoding filters used in + /// the current function. + const std::vector<unsigned> &getFilterIds() const { + return FilterIds; + } + + /// getPersonality - Return a personality function if available. The presence + /// of one is required to emit exception handling info. + Function *getPersonality() const; + +}; // End class MachineModuleInfo + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h new file mode 100644 index 0000000..7a41684 --- /dev/null +++ b/include/llvm/CodeGen/MachineOperand.h @@ -0,0 +1,447 @@ +//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the MachineOperand class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEOPERAND_H +#define LLVM_CODEGEN_MACHINEOPERAND_H + +#include "llvm/Support/DataTypes.h" +#include <cassert> +#include <iosfwd> + +namespace llvm { + +class ConstantFP; +class MachineBasicBlock; +class GlobalValue; +class MachineInstr; +class TargetMachine; +class MachineRegisterInfo; +class raw_ostream; + +/// MachineOperand class - Representation of each machine instruction operand. +/// +class MachineOperand { +public: + enum MachineOperandType { + MO_Register, ///< Register operand. + MO_Immediate, ///< Immediate operand + MO_FPImmediate, ///< Floating-point immediate operand + MO_MachineBasicBlock, ///< MachineBasicBlock reference + MO_FrameIndex, ///< Abstract Stack Frame Index + MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool + MO_JumpTableIndex, ///< Address of indexed Jump Table for switch + MO_ExternalSymbol, ///< Name of external global symbol + MO_GlobalAddress ///< Address of a global value + }; + +private: + /// OpKind - Specify what kind of operand this is. This discriminates the + /// union. + MachineOperandType OpKind : 8; + + /// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register + /// operands. + + /// IsDef - True if this is a def, false if this is a use of the register. + /// + bool IsDef : 1; + + /// IsImp - True if this is an implicit def or use, false if it is explicit. + /// + bool IsImp : 1; + + /// IsKill - True if this instruction is the last use of the register on this + /// path through the function. This is only valid on uses of registers. + bool IsKill : 1; + + /// IsDead - True if this register is never used by a subsequent instruction. + /// This is only valid on definitions of registers. + bool IsDead : 1; + + /// IsEarlyClobber - True if this MO_Register 'def' operand is written to + /// by the MachineInstr before all input registers are read. This is used to + /// model the GCC inline asm '&' constraint modifier. + bool IsEarlyClobber : 1; + + /// SubReg - Subregister number, only valid for MO_Register. A value of 0 + /// indicates the MO_Register has no subReg. + unsigned char SubReg; + + /// ParentMI - This is the instruction that this operand is embedded into. + /// This is valid for all operand types, when the operand is in an instr. + MachineInstr *ParentMI; + + /// Contents union - This contains the payload for the various operand types. + union { + MachineBasicBlock *MBB; // For MO_MachineBasicBlock. + const ConstantFP *CFP; // For MO_FPImmediate. + int64_t ImmVal; // For MO_Immediate. + + struct { // For MO_Register. + unsigned RegNo; + MachineOperand **Prev; // Access list for register. + MachineOperand *Next; + } Reg; + + /// OffsetedInfo - This struct contains the offset and an object identifier. + /// this represent the object as with an optional offset from it. + struct { + union { + int Index; // For MO_*Index - The index itself. + const char *SymbolName; // For MO_ExternalSymbol. + GlobalValue *GV; // For MO_GlobalAddress. + } Val; + int64_t Offset; // An offset from the object. + } OffsetedInfo; + } Contents; + + explicit MachineOperand(MachineOperandType K) : OpKind(K), ParentMI(0) {} +public: + MachineOperand(const MachineOperand &M) { + *this = M; + } + + ~MachineOperand() {} + + /// getType - Returns the MachineOperandType for this operand. + /// + MachineOperandType getType() const { return OpKind; } + + /// getParent - Return the instruction that this operand belongs to. + /// + MachineInstr *getParent() { return ParentMI; } + const MachineInstr *getParent() const { return ParentMI; } + + void print(std::ostream &os, const TargetMachine *TM = 0) const; + void print(raw_ostream &os, const TargetMachine *TM = 0) const; + + //===--------------------------------------------------------------------===// + // Accessors that tell you what kind of MachineOperand you're looking at. + //===--------------------------------------------------------------------===// + + /// isReg - Tests if this is a MO_Register operand. + bool isReg() const { return OpKind == MO_Register; } + /// isImm - Tests if this is a MO_Immediate operand. + bool isImm() const { return OpKind == MO_Immediate; } + /// isFPImm - Tests if this is a MO_FPImmediate operand. + bool isFPImm() const { return OpKind == MO_FPImmediate; } + /// isMBB - Tests if this is a MO_MachineBasicBlock operand. + bool isMBB() const { return OpKind == MO_MachineBasicBlock; } + /// isFI - Tests if this is a MO_FrameIndex operand. + bool isFI() const { return OpKind == MO_FrameIndex; } + /// isCPI - Tests if this is a MO_ConstantPoolIndex operand. + bool isCPI() const { return OpKind == MO_ConstantPoolIndex; } + /// isJTI - Tests if this is a MO_JumpTableIndex operand. + bool isJTI() const { return OpKind == MO_JumpTableIndex; } + /// isGlobal - Tests if this is a MO_GlobalAddress operand. + bool isGlobal() const { return OpKind == MO_GlobalAddress; } + /// isSymbol - Tests if this is a MO_ExternalSymbol operand. + bool isSymbol() const { return OpKind == MO_ExternalSymbol; } + + //===--------------------------------------------------------------------===// + // Accessors for Register Operands + //===--------------------------------------------------------------------===// + + /// getReg - Returns the register number. + unsigned getReg() const { + assert(isReg() && "This is not a register operand!"); + return Contents.Reg.RegNo; + } + + unsigned getSubReg() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return (unsigned)SubReg; + } + + bool isUse() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return !IsDef; + } + + bool isDef() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return IsDef; + } + + bool isImplicit() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return IsImp; + } + + bool isDead() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return IsDead; + } + + bool isKill() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return IsKill; + } + + bool isEarlyClobber() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return IsEarlyClobber; + } + + /// getNextOperandForReg - Return the next MachineOperand in the function that + /// uses or defines this register. + MachineOperand *getNextOperandForReg() const { + assert(isReg() && "This is not a register operand!"); + return Contents.Reg.Next; + } + + //===--------------------------------------------------------------------===// + // Mutators for Register Operands + //===--------------------------------------------------------------------===// + + /// Change the register this operand corresponds to. + /// + void setReg(unsigned Reg); + + void setSubReg(unsigned subReg) { + assert(isReg() && "Wrong MachineOperand accessor"); + SubReg = (unsigned char)subReg; + } + + void setIsUse(bool Val = true) { + assert(isReg() && "Wrong MachineOperand accessor"); + IsDef = !Val; + } + + void setIsDef(bool Val = true) { + assert(isReg() && "Wrong MachineOperand accessor"); + IsDef = Val; + } + + void setImplicit(bool Val = true) { + assert(isReg() && "Wrong MachineOperand accessor"); + IsImp = Val; + } + + void setIsKill(bool Val = true) { + assert(isReg() && !IsDef && "Wrong MachineOperand accessor"); + IsKill = Val; + } + + void setIsDead(bool Val = true) { + assert(isReg() && IsDef && "Wrong MachineOperand accessor"); + IsDead = Val; + } + + void setIsEarlyClobber(bool Val = true) { + assert(isReg() && IsDef && "Wrong MachineOperand accessor"); + IsEarlyClobber = Val; + } + + //===--------------------------------------------------------------------===// + // Accessors for various operand types. + //===--------------------------------------------------------------------===// + + int64_t getImm() const { + assert(isImm() && "Wrong MachineOperand accessor"); + return Contents.ImmVal; + } + + const ConstantFP *getFPImm() const { + assert(isFPImm() && "Wrong MachineOperand accessor"); + return Contents.CFP; + } + + MachineBasicBlock *getMBB() const { + assert(isMBB() && "Wrong MachineOperand accessor"); + return Contents.MBB; + } + + int getIndex() const { + assert((isFI() || isCPI() || isJTI()) && + "Wrong MachineOperand accessor"); + return Contents.OffsetedInfo.Val.Index; + } + + GlobalValue *getGlobal() const { + assert(isGlobal() && "Wrong MachineOperand accessor"); + return Contents.OffsetedInfo.Val.GV; + } + + int64_t getOffset() const { + assert((isGlobal() || isSymbol() || isCPI()) && + "Wrong MachineOperand accessor"); + return Contents.OffsetedInfo.Offset; + } + + const char *getSymbolName() const { + assert(isSymbol() && "Wrong MachineOperand accessor"); + return Contents.OffsetedInfo.Val.SymbolName; + } + + //===--------------------------------------------------------------------===// + // Mutators for various operand types. + //===--------------------------------------------------------------------===// + + void setImm(int64_t immVal) { + assert(isImm() && "Wrong MachineOperand mutator"); + Contents.ImmVal = immVal; + } + + void setOffset(int64_t Offset) { + assert((isGlobal() || isSymbol() || isCPI()) && + "Wrong MachineOperand accessor"); + Contents.OffsetedInfo.Offset = Offset; + } + + void setIndex(int Idx) { + assert((isFI() || isCPI() || isJTI()) && + "Wrong MachineOperand accessor"); + Contents.OffsetedInfo.Val.Index = Idx; + } + + void setMBB(MachineBasicBlock *MBB) { + assert(isMBB() && "Wrong MachineOperand accessor"); + Contents.MBB = MBB; + } + + //===--------------------------------------------------------------------===// + // Other methods. + //===--------------------------------------------------------------------===// + + /// isIdenticalTo - Return true if this operand is identical to the specified + /// operand. Note: This method ignores isKill and isDead properties. + bool isIdenticalTo(const MachineOperand &Other) const; + + /// ChangeToImmediate - Replace this operand with a new immediate operand of + /// the specified value. If an operand is known to be an immediate already, + /// the setImm method should be used. + void ChangeToImmediate(int64_t ImmVal); + + /// ChangeToRegister - Replace this operand with a new register operand of + /// the specified value. If an operand is known to be an register already, + /// the setReg method should be used. + void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false, + bool isKill = false, bool isDead = false); + + //===--------------------------------------------------------------------===// + // Construction methods. + //===--------------------------------------------------------------------===// + + static MachineOperand CreateImm(int64_t Val) { + MachineOperand Op(MachineOperand::MO_Immediate); + Op.setImm(Val); + return Op; + } + + static MachineOperand CreateFPImm(const ConstantFP *CFP) { + MachineOperand Op(MachineOperand::MO_FPImmediate); + Op.Contents.CFP = CFP; + return Op; + } + + static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false, + bool isKill = false, bool isDead = false, + unsigned SubReg = 0, + bool isEarlyClobber = false) { + MachineOperand Op(MachineOperand::MO_Register); + Op.IsDef = isDef; + Op.IsImp = isImp; + Op.IsKill = isKill; + Op.IsDead = isDead; + Op.IsEarlyClobber = isEarlyClobber; + Op.Contents.Reg.RegNo = Reg; + Op.Contents.Reg.Prev = 0; + Op.Contents.Reg.Next = 0; + Op.SubReg = SubReg; + return Op; + } + static MachineOperand CreateMBB(MachineBasicBlock *MBB) { + MachineOperand Op(MachineOperand::MO_MachineBasicBlock); + Op.setMBB(MBB); + return Op; + } + static MachineOperand CreateFI(unsigned Idx) { + MachineOperand Op(MachineOperand::MO_FrameIndex); + Op.setIndex(Idx); + return Op; + } + static MachineOperand CreateCPI(unsigned Idx, int Offset) { + MachineOperand Op(MachineOperand::MO_ConstantPoolIndex); + Op.setIndex(Idx); + Op.setOffset(Offset); + return Op; + } + static MachineOperand CreateJTI(unsigned Idx) { + MachineOperand Op(MachineOperand::MO_JumpTableIndex); + Op.setIndex(Idx); + return Op; + } + static MachineOperand CreateGA(GlobalValue *GV, int64_t Offset) { + MachineOperand Op(MachineOperand::MO_GlobalAddress); + Op.Contents.OffsetedInfo.Val.GV = GV; + Op.setOffset(Offset); + return Op; + } + static MachineOperand CreateES(const char *SymName, int64_t Offset = 0) { + MachineOperand Op(MachineOperand::MO_ExternalSymbol); + Op.Contents.OffsetedInfo.Val.SymbolName = SymName; + Op.setOffset(Offset); + return Op; + } + const MachineOperand &operator=(const MachineOperand &MO) { + OpKind = MO.OpKind; + IsDef = MO.IsDef; + IsImp = MO.IsImp; + IsKill = MO.IsKill; + IsDead = MO.IsDead; + IsEarlyClobber = MO.IsEarlyClobber; + SubReg = MO.SubReg; + ParentMI = MO.ParentMI; + Contents = MO.Contents; + return *this; + } + + friend class MachineInstr; + friend class MachineRegisterInfo; +private: + //===--------------------------------------------------------------------===// + // Methods for handling register use/def lists. + //===--------------------------------------------------------------------===// + + /// isOnRegUseList - Return true if this operand is on a register use/def list + /// or false if not. This can only be called for register operands that are + /// part of a machine instruction. + bool isOnRegUseList() const { + assert(isReg() && "Can only add reg operand to use lists"); + return Contents.Reg.Prev != 0; + } + + /// AddRegOperandToRegInfo - Add this register operand to the specified + /// MachineRegisterInfo. If it is null, then the next/prev fields should be + /// explicitly nulled out. + void AddRegOperandToRegInfo(MachineRegisterInfo *RegInfo); + + /// RemoveRegOperandFromRegInfo - Remove this register operand from the + /// MachineRegisterInfo it is linked with. + void RemoveRegOperandFromRegInfo(); +}; + +inline std::ostream &operator<<(std::ostream &OS, const MachineOperand &MO) { + MO.print(OS, 0); + return OS; +} + +inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) { + MO.print(OS, 0); + return OS; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachinePassRegistry.h b/include/llvm/CodeGen/MachinePassRegistry.h new file mode 100644 index 0000000..680d2b8 --- /dev/null +++ b/include/llvm/CodeGen/MachinePassRegistry.h @@ -0,0 +1,156 @@ +//===-- llvm/CodeGen/MachinePassRegistry.h ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the mechanics for machine function pass registries. A +// function pass registry (MachinePassRegistry) is auto filled by the static +// constructors of MachinePassRegistryNode. Further there is a command line +// parser (RegisterPassParser) which listens to each registry for additions +// and deletions, so that the appropriate command option is updated. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H +#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H + +#include "llvm/CodeGen/Passes.h" +#include "llvm/Support/CommandLine.h" + +namespace llvm { + +typedef void *(*MachinePassCtor)(); + + +//===----------------------------------------------------------------------===// +/// +/// MachinePassRegistryListener - Listener to adds and removals of nodes in +/// registration list. +/// +//===----------------------------------------------------------------------===// +class MachinePassRegistryListener { +public: + MachinePassRegistryListener() {} + virtual ~MachinePassRegistryListener() {} + virtual void NotifyAdd(const char *N, MachinePassCtor C, const char *D) = 0; + virtual void NotifyRemove(const char *N) = 0; +}; + + +//===----------------------------------------------------------------------===// +/// +/// MachinePassRegistryNode - Machine pass node stored in registration list. +/// +//===----------------------------------------------------------------------===// +class MachinePassRegistryNode { + +private: + + MachinePassRegistryNode *Next; // Next function pass in list. + const char *Name; // Name of function pass. + const char *Description; // Description string. + MachinePassCtor Ctor; // Function pass creator. + +public: + + MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C) + : Next(NULL) + , Name(N) + , Description(D) + , Ctor(C) + {} + + // Accessors + MachinePassRegistryNode *getNext() const { return Next; } + MachinePassRegistryNode **getNextAddress() { return &Next; } + const char *getName() const { return Name; } + const char *getDescription() const { return Description; } + MachinePassCtor getCtor() const { return Ctor; } + void setNext(MachinePassRegistryNode *N) { Next = N; } + +}; + + +//===----------------------------------------------------------------------===// +/// +/// MachinePassRegistry - Track the registration of machine passes. +/// +//===----------------------------------------------------------------------===// +class MachinePassRegistry { + +private: + + MachinePassRegistryNode *List; // List of registry nodes. + MachinePassCtor Default; // Default function pass creator. + MachinePassRegistryListener* Listener;// Listener for list adds are removes. + +public: + + // NO CONSTRUCTOR - we don't want static constructor ordering to mess + // with the registry. + + // Accessors. + // + MachinePassRegistryNode *getList() { return List; } + MachinePassCtor getDefault() { return Default; } + void setDefault(MachinePassCtor C) { Default = C; } + void setListener(MachinePassRegistryListener *L) { Listener = L; } + + /// Add - Adds a function pass to the registration list. + /// + void Add(MachinePassRegistryNode *Node); + + /// Remove - Removes a function pass from the registration list. + /// + void Remove(MachinePassRegistryNode *Node); + +}; + + +//===----------------------------------------------------------------------===// +/// +/// RegisterPassParser class - Handle the addition of new machine passes. +/// +//===----------------------------------------------------------------------===// +template<class RegistryClass> +class RegisterPassParser : public MachinePassRegistryListener, + public cl::parser<typename RegistryClass::FunctionPassCtor> { +public: + RegisterPassParser() {} + ~RegisterPassParser() { RegistryClass::setListener(NULL); } + + void initialize(cl::Option &O) { + cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O); + + // Add existing passes to option. + for (RegistryClass *Node = RegistryClass::getList(); + Node; Node = Node->getNext()) { + addLiteralOption(Node->getName(), + (typename RegistryClass::FunctionPassCtor)Node->getCtor(), + Node->getDescription()); + } + + // Make sure we listen for list changes. + RegistryClass::setListener(this); + } + + // Implement the MachinePassRegistryListener callbacks. + // + virtual void NotifyAdd(const char *N, + MachinePassCtor C, + const char *D) { + this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D); + } + virtual void NotifyRemove(const char *N) { + this->removeLiteralOption(N); + } +}; + + +} // end namespace llvm + +#endif diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h new file mode 100644 index 0000000..02f9b7c --- /dev/null +++ b/include/llvm/CodeGen/MachineRegisterInfo.h @@ -0,0 +1,305 @@ +//===-- llvm/CodeGen/MachineRegisterInfo.h ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MachineRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H +#define LLVM_CODEGEN_MACHINEREGISTERINFO_H + +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/iterator.h" +#include <vector> + +namespace llvm { + +/// MachineRegisterInfo - Keep track of information for virtual and physical +/// registers, including vreg register classes, use/def chains for registers, +/// etc. +class MachineRegisterInfo { + /// VRegInfo - Information we keep for each virtual register. The entries in + /// this vector are actually converted to vreg numbers by adding the + /// TargetRegisterInfo::FirstVirtualRegister delta to their index. + /// + /// Each element in this list contains the register class of the vreg and the + /// start of the use/def list for the register. + std::vector<std::pair<const TargetRegisterClass*, MachineOperand*> > VRegInfo; + + /// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to + /// virtual registers. For each target register class, it keeps a list of + /// virtual registers belonging to the class. + std::vector<std::vector<unsigned> > RegClass2VRegMap; + + /// PhysRegUseDefLists - This is an array of the head of the use/def list for + /// physical registers. + MachineOperand **PhysRegUseDefLists; + + /// UsedPhysRegs - This is a bit vector that is computed and set by the + /// register allocator, and must be kept up to date by passes that run after + /// register allocation (though most don't modify this). This is used + /// so that the code generator knows which callee save registers to save and + /// for other target specific uses. + BitVector UsedPhysRegs; + + /// LiveIns/LiveOuts - Keep track of the physical registers that are + /// livein/liveout of the function. Live in values are typically arguments in + /// registers, live out values are typically return values in registers. + /// LiveIn values are allowed to have virtual registers associated with them, + /// stored in the second element. + std::vector<std::pair<unsigned, unsigned> > LiveIns; + std::vector<unsigned> LiveOuts; + + MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT + void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT +public: + explicit MachineRegisterInfo(const TargetRegisterInfo &TRI); + ~MachineRegisterInfo(); + + //===--------------------------------------------------------------------===// + // Register Info + //===--------------------------------------------------------------------===// + + /// reg_begin/reg_end - Provide iteration support to walk over all definitions + /// and uses of a register within the MachineFunction that corresponds to this + /// MachineRegisterInfo object. + template<bool Uses, bool Defs> + class defusechain_iterator; + + /// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified + /// register. + typedef defusechain_iterator<true,true> reg_iterator; + reg_iterator reg_begin(unsigned RegNo) const { + return reg_iterator(getRegUseDefListHead(RegNo)); + } + static reg_iterator reg_end() { return reg_iterator(0); } + + /// reg_empty - Return true if there are no instructions using or defining the + /// specified register (it may be live-in). + bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); } + + /// def_iterator/def_begin/def_end - Walk all defs of the specified register. + typedef defusechain_iterator<false,true> def_iterator; + def_iterator def_begin(unsigned RegNo) const { + return def_iterator(getRegUseDefListHead(RegNo)); + } + static def_iterator def_end() { return def_iterator(0); } + + /// def_empty - Return true if there are no instructions defining the + /// specified register (it may be live-in). + bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); } + + /// use_iterator/use_begin/use_end - Walk all uses of the specified register. + typedef defusechain_iterator<true,false> use_iterator; + use_iterator use_begin(unsigned RegNo) const { + return use_iterator(getRegUseDefListHead(RegNo)); + } + static use_iterator use_end() { return use_iterator(0); } + + /// use_empty - Return true if there are no instructions using the specified + /// register. + bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); } + + + /// replaceRegWith - Replace all instances of FromReg with ToReg in the + /// machine function. This is like llvm-level X->replaceAllUsesWith(Y), + /// except that it also changes any definitions of the register as well. + void replaceRegWith(unsigned FromReg, unsigned ToReg); + + /// getRegUseDefListHead - Return the head pointer for the register use/def + /// list for the specified virtual or physical register. + MachineOperand *&getRegUseDefListHead(unsigned RegNo) { + if (RegNo < TargetRegisterInfo::FirstVirtualRegister) + return PhysRegUseDefLists[RegNo]; + RegNo -= TargetRegisterInfo::FirstVirtualRegister; + return VRegInfo[RegNo].second; + } + + MachineOperand *getRegUseDefListHead(unsigned RegNo) const { + if (RegNo < TargetRegisterInfo::FirstVirtualRegister) + return PhysRegUseDefLists[RegNo]; + RegNo -= TargetRegisterInfo::FirstVirtualRegister; + return VRegInfo[RegNo].second; + } + + /// getVRegDef - Return the machine instr that defines the specified virtual + /// register or null if none is found. This assumes that the code is in SSA + /// form, so there should only be one definition. + MachineInstr *getVRegDef(unsigned Reg) const; + +#ifndef NDEBUG + void dumpUses(unsigned RegNo) const; +#endif + + //===--------------------------------------------------------------------===// + // Virtual Register Info + //===--------------------------------------------------------------------===// + + /// getRegClass - Return the register class of the specified virtual register. + /// + const TargetRegisterClass *getRegClass(unsigned Reg) const { + Reg -= TargetRegisterInfo::FirstVirtualRegister; + assert(Reg < VRegInfo.size() && "Invalid vreg!"); + return VRegInfo[Reg].first; + } + + /// setRegClass - Set the register class of the specified virtual register. + /// + void setRegClass(unsigned Reg, const TargetRegisterClass *RC); + + /// createVirtualRegister - Create and return a new virtual register in the + /// function with the specified register class. + /// + unsigned createVirtualRegister(const TargetRegisterClass *RegClass); + + /// getLastVirtReg - Return the highest currently assigned virtual register. + /// + unsigned getLastVirtReg() const { + return (unsigned)VRegInfo.size()+TargetRegisterInfo::FirstVirtualRegister-1; + } + + /// getRegClassVirtRegs - Return the list of virtual registers of the given + /// target register class. + std::vector<unsigned> &getRegClassVirtRegs(const TargetRegisterClass *RC) { + return RegClass2VRegMap[RC->getID()]; + } + + //===--------------------------------------------------------------------===// + // Physical Register Use Info + //===--------------------------------------------------------------------===// + + /// isPhysRegUsed - Return true if the specified register is used in this + /// function. This only works after register allocation. + bool isPhysRegUsed(unsigned Reg) const { return UsedPhysRegs[Reg]; } + + /// setPhysRegUsed - Mark the specified register used in this function. + /// This should only be called during and after register allocation. + void setPhysRegUsed(unsigned Reg) { UsedPhysRegs[Reg] = true; } + + /// setPhysRegUnused - Mark the specified register unused in this function. + /// This should only be called during and after register allocation. + void setPhysRegUnused(unsigned Reg) { UsedPhysRegs[Reg] = false; } + + + //===--------------------------------------------------------------------===// + // LiveIn/LiveOut Management + //===--------------------------------------------------------------------===// + + /// addLiveIn/Out - Add the specified register as a live in/out. Note that it + /// is an error to add the same register to the same set more than once. + void addLiveIn(unsigned Reg, unsigned vreg = 0) { + LiveIns.push_back(std::make_pair(Reg, vreg)); + } + void addLiveOut(unsigned Reg) { LiveOuts.push_back(Reg); } + + // Iteration support for live in/out sets. These sets are kept in sorted + // order by their register number. + typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator + livein_iterator; + typedef std::vector<unsigned>::const_iterator liveout_iterator; + livein_iterator livein_begin() const { return LiveIns.begin(); } + livein_iterator livein_end() const { return LiveIns.end(); } + bool livein_empty() const { return LiveIns.empty(); } + liveout_iterator liveout_begin() const { return LiveOuts.begin(); } + liveout_iterator liveout_end() const { return LiveOuts.end(); } + bool liveout_empty() const { return LiveOuts.empty(); } + + bool isLiveIn(unsigned Reg) const { + for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I) + if (I->first == Reg || I->second == Reg) + return true; + return false; + } + +private: + void HandleVRegListReallocation(); + +public: + /// defusechain_iterator - This class provides iterator support for machine + /// operands in the function that use or define a specific register. If + /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it + /// returns defs. If neither are true then you are silly and it always + /// returns end(). + template<bool ReturnUses, bool ReturnDefs> + class defusechain_iterator + : public forward_iterator<MachineInstr, ptrdiff_t> { + MachineOperand *Op; + explicit defusechain_iterator(MachineOperand *op) : Op(op) { + // If the first node isn't one we're interested in, advance to one that + // we are interested in. + if (op) { + if ((!ReturnUses && op->isUse()) || + (!ReturnDefs && op->isDef())) + ++*this; + } + } + friend class MachineRegisterInfo; + public: + typedef forward_iterator<MachineInstr, ptrdiff_t>::reference reference; + typedef forward_iterator<MachineInstr, ptrdiff_t>::pointer pointer; + + defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {} + defusechain_iterator() : Op(0) {} + + bool operator==(const defusechain_iterator &x) const { + return Op == x.Op; + } + bool operator!=(const defusechain_iterator &x) const { + return !operator==(x); + } + + /// atEnd - return true if this iterator is equal to reg_end() on the value. + bool atEnd() const { return Op == 0; } + + // Iterator traversal: forward iteration only + defusechain_iterator &operator++() { // Preincrement + assert(Op && "Cannot increment end iterator!"); + Op = Op->getNextOperandForReg(); + + // If this is an operand we don't care about, skip it. + while (Op && ((!ReturnUses && Op->isUse()) || + (!ReturnDefs && Op->isDef()))) + Op = Op->getNextOperandForReg(); + + return *this; + } + defusechain_iterator operator++(int) { // Postincrement + defusechain_iterator tmp = *this; ++*this; return tmp; + } + + MachineOperand &getOperand() const { + assert(Op && "Cannot dereference end iterator!"); + return *Op; + } + + /// getOperandNo - Return the operand # of this MachineOperand in its + /// MachineInstr. + unsigned getOperandNo() const { + assert(Op && "Cannot dereference end iterator!"); + return Op - &Op->getParent()->getOperand(0); + } + + // Retrieve a reference to the current operand. + MachineInstr &operator*() const { + assert(Op && "Cannot dereference end iterator!"); + return *Op->getParent(); + } + + MachineInstr *operator->() const { + assert(Op && "Cannot dereference end iterator!"); + return Op->getParent(); + } + }; + +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/MachineRelocation.h b/include/llvm/CodeGen/MachineRelocation.h new file mode 100644 index 0000000..c539781 --- /dev/null +++ b/include/llvm/CodeGen/MachineRelocation.h @@ -0,0 +1,339 @@ +//===-- llvm/CodeGen/MachineRelocation.h - Target Relocation ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MachineRelocation class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINERELOCATION_H +#define LLVM_CODEGEN_MACHINERELOCATION_H + +#include "llvm/Support/DataTypes.h" +#include <cassert> + +namespace llvm { +class GlobalValue; +class MachineBasicBlock; + +/// MachineRelocation - This represents a target-specific relocation value, +/// produced by the code emitter. This relocation is resolved after the has +/// been emitted, either to an object file or to memory, when the target of the +/// relocation can be resolved. +/// +/// A relocation is made up of the following logical portions: +/// 1. An offset in the machine code buffer, the location to modify. +/// 2. A target specific relocation type (a number from 0 to 63). +/// 3. A symbol being referenced, either as a GlobalValue* or as a string. +/// 4. An optional constant value to be added to the reference. +/// 5. A bit, CanRewrite, which indicates to the JIT that a function stub is +/// not needed for the relocation. +/// 6. An index into the GOT, if the target uses a GOT +/// +class MachineRelocation { + enum AddressType { + isResult, // Relocation has be transformed into its result pointer. + isGV, // The Target.GV field is valid. + isIndirectSym, // Relocation of an indirect symbol. + isBB, // Relocation of BB address. + isExtSym, // The Target.ExtSym field is valid. + isConstPool, // Relocation of constant pool address. + isJumpTable, // Relocation of jump table address. + isGOTIndex // The Target.GOTIndex field is valid. + }; + + /// Offset - This is the offset from the start of the code buffer of the + /// relocation to perform. + uintptr_t Offset; + + /// ConstantVal - A field that may be used by the target relocation type. + intptr_t ConstantVal; + + union { + void *Result; // If this has been resolved to a resolved pointer + GlobalValue *GV; // If this is a pointer to a GV or an indirect ref. + MachineBasicBlock *MBB; // If this is a pointer to a LLVM BB + const char *ExtSym; // If this is a pointer to a named symbol + unsigned Index; // Constant pool / jump table index + unsigned GOTIndex; // Index in the GOT of this symbol/global + } Target; + + unsigned TargetReloType : 6; // The target relocation ID + AddressType AddrType : 4; // The field of Target to use + bool NeedStub : 1; // True if this relocation requires a stub + bool GOTRelative : 1; // Should this relocation be relative to the GOT? + bool TargetResolve : 1; // True if target should resolve the address + +public: + // Relocation types used in a generic implementation. Currently, relocation + // entries for all things use the generic VANILLA type until they are refined + // into target relocation types. + enum RelocationType { + VANILLA + }; + + /// MachineRelocation::getGV - Return a relocation entry for a GlobalValue. + /// + static MachineRelocation getGV(uintptr_t offset, unsigned RelocationType, + GlobalValue *GV, intptr_t cst = 0, + bool NeedStub = 0, + bool GOTrelative = 0) { + assert((RelocationType & ~63) == 0 && "Relocation type too large!"); + MachineRelocation Result; + Result.Offset = offset; + Result.ConstantVal = cst; + Result.TargetReloType = RelocationType; + Result.AddrType = isGV; + Result.NeedStub = NeedStub; + Result.GOTRelative = GOTrelative; + Result.TargetResolve = false; + Result.Target.GV = GV; + return Result; + } + + /// MachineRelocation::getIndirectSymbol - Return a relocation entry for an + /// indirect symbol. + static MachineRelocation getIndirectSymbol(uintptr_t offset, + unsigned RelocationType, + GlobalValue *GV, intptr_t cst = 0, + bool NeedStub = 0, + bool GOTrelative = 0) { + assert((RelocationType & ~63) == 0 && "Relocation type too large!"); + MachineRelocation Result; + Result.Offset = offset; + Result.ConstantVal = cst; + Result.TargetReloType = RelocationType; + Result.AddrType = isIndirectSym; + Result.NeedStub = NeedStub; + Result.GOTRelative = GOTrelative; + Result.TargetResolve = false; + Result.Target.GV = GV; + return Result; + } + + /// MachineRelocation::getBB - Return a relocation entry for a BB. + /// + static MachineRelocation getBB(uintptr_t offset,unsigned RelocationType, + MachineBasicBlock *MBB, intptr_t cst = 0) { + assert((RelocationType & ~63) == 0 && "Relocation type too large!"); + MachineRelocation Result; + Result.Offset = offset; + Result.ConstantVal = cst; + Result.TargetReloType = RelocationType; + Result.AddrType = isBB; + Result.NeedStub = false; + Result.GOTRelative = false; + Result.TargetResolve = false; + Result.Target.MBB = MBB; + return Result; + } + + /// MachineRelocation::getExtSym - Return a relocation entry for an external + /// symbol, like "free". + /// + static MachineRelocation getExtSym(uintptr_t offset, unsigned RelocationType, + const char *ES, intptr_t cst = 0, + bool GOTrelative = 0) { + assert((RelocationType & ~63) == 0 && "Relocation type too large!"); + MachineRelocation Result; + Result.Offset = offset; + Result.ConstantVal = cst; + Result.TargetReloType = RelocationType; + Result.AddrType = isExtSym; + Result.NeedStub = true; + Result.GOTRelative = GOTrelative; + Result.TargetResolve = false; + Result.Target.ExtSym = ES; + return Result; + } + + /// MachineRelocation::getConstPool - Return a relocation entry for a constant + /// pool entry. + /// + static MachineRelocation getConstPool(uintptr_t offset,unsigned RelocationType, + unsigned CPI, intptr_t cst = 0, + bool letTargetResolve = false) { + assert((RelocationType & ~63) == 0 && "Relocation type too large!"); + MachineRelocation Result; + Result.Offset = offset; + Result.ConstantVal = cst; + Result.TargetReloType = RelocationType; + Result.AddrType = isConstPool; + Result.NeedStub = false; + Result.GOTRelative = false; + Result.TargetResolve = letTargetResolve; + Result.Target.Index = CPI; + return Result; + } + + /// MachineRelocation::getJumpTable - Return a relocation entry for a jump + /// table entry. + /// + static MachineRelocation getJumpTable(uintptr_t offset,unsigned RelocationType, + unsigned JTI, intptr_t cst = 0, + bool letTargetResolve = false) { + assert((RelocationType & ~63) == 0 && "Relocation type too large!"); + MachineRelocation Result; + Result.Offset = offset; + Result.ConstantVal = cst; + Result.TargetReloType = RelocationType; + Result.AddrType = isJumpTable; + Result.NeedStub = false; + Result.GOTRelative = false; + Result.TargetResolve = letTargetResolve; + Result.Target.Index = JTI; + return Result; + } + + /// getMachineCodeOffset - Return the offset into the code buffer that the + /// relocation should be performed. + intptr_t getMachineCodeOffset() const { + return Offset; + } + + /// getRelocationType - Return the target-specific relocation ID for this + /// relocation. + unsigned getRelocationType() const { + return TargetReloType; + } + + /// getConstantVal - Get the constant value associated with this relocation. + /// This is often an offset from the symbol. + /// + intptr_t getConstantVal() const { + return ConstantVal; + } + + /// setConstantVal - Set the constant value associated with this relocation. + /// This is often an offset from the symbol. + /// + void setConstantVal(intptr_t val) { + ConstantVal = val; + } + + /// isGlobalValue - Return true if this relocation is a GlobalValue, as + /// opposed to a constant string. + bool isGlobalValue() const { + return AddrType == isGV; + } + + /// isIndirectSymbol - Return true if this relocation is the address an + /// indirect symbol + bool isIndirectSymbol() const { + return AddrType == isIndirectSym; + } + + /// isBasicBlock - Return true if this relocation is a basic block reference. + /// + bool isBasicBlock() const { + return AddrType == isBB; + } + + /// isExternalSymbol - Return true if this is a constant string. + /// + bool isExternalSymbol() const { + return AddrType == isExtSym; + } + + /// isConstantPoolIndex - Return true if this is a constant pool reference. + /// + bool isConstantPoolIndex() const { + return AddrType == isConstPool; + } + + /// isJumpTableIndex - Return true if this is a jump table reference. + /// + bool isJumpTableIndex() const { + return AddrType == isJumpTable; + } + + /// isGOTRelative - Return true the target wants the index into the GOT of + /// the symbol rather than the address of the symbol. + bool isGOTRelative() const { + return GOTRelative; + } + + /// doesntNeedStub - This function returns true if the JIT for this target + /// target is capable of directly handling the relocated GlobalValue reference + /// without using either a stub function or issuing an extra load to get the + /// GV address. + bool doesntNeedStub() const { + return !NeedStub; + } + + /// letTargetResolve - Return true if the target JITInfo is usually + /// responsible for resolving the address of this relocation. + bool letTargetResolve() const { + return TargetResolve; + } + + /// getGlobalValue - If this is a global value reference, return the + /// referenced global. + GlobalValue *getGlobalValue() const { + assert((isGlobalValue() || isIndirectSymbol()) && + "This is not a global value reference!"); + return Target.GV; + } + + MachineBasicBlock *getBasicBlock() const { + assert(isBasicBlock() && "This is not a basic block reference!"); + return Target.MBB; + } + + /// getString - If this is a string value, return the string reference. + /// + const char *getExternalSymbol() const { + assert(isExternalSymbol() && "This is not an external symbol reference!"); + return Target.ExtSym; + } + + /// getConstantPoolIndex - If this is a const pool reference, return + /// the index into the constant pool. + unsigned getConstantPoolIndex() const { + assert(isConstantPoolIndex() && "This is not a constant pool reference!"); + return Target.Index; + } + + /// getJumpTableIndex - If this is a jump table reference, return + /// the index into the jump table. + unsigned getJumpTableIndex() const { + assert(isJumpTableIndex() && "This is not a jump table reference!"); + return Target.Index; + } + + /// getResultPointer - Once this has been resolved to point to an actual + /// address, this returns the pointer. + void *getResultPointer() const { + assert(AddrType == isResult && "Result pointer isn't set yet!"); + return Target.Result; + } + + /// setResultPointer - Set the result to the specified pointer value. + /// + void setResultPointer(void *Ptr) { + Target.Result = Ptr; + AddrType = isResult; + } + + /// setGOTIndex - Set the GOT index to a specific value. + void setGOTIndex(unsigned idx) { + AddrType = isGOTIndex; + Target.GOTIndex = idx; + } + + /// getGOTIndex - Once this has been resolved to an entry in the GOT, + /// this returns that index. The index is from the lowest address entry + /// in the GOT. + unsigned getGOTIndex() const { + assert(AddrType == isGOTIndex); + return Target.GOTIndex; + } +}; +} + +#endif diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h new file mode 100644 index 0000000..7f1c16f --- /dev/null +++ b/include/llvm/CodeGen/Passes.h @@ -0,0 +1,212 @@ +//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines interfaces to access the target independent code generation +// passes provided by the LLVM backend. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_PASSES_H +#define LLVM_CODEGEN_PASSES_H + +#include <iosfwd> +#include <string> + +namespace llvm { + + class FunctionPass; + class PassInfo; + class TargetMachine; + class TargetLowering; + class RegisterCoalescer; + + /// createUnreachableBlockEliminationPass - The LLVM code generator does not + /// work well with unreachable basic blocks (what live ranges make sense for a + /// block that cannot be reached?). As such, a code generator should either + /// not instruction select unreachable blocks, or it can run this pass as it's + /// last LLVM modifying pass to clean up blocks that are not reachable from + /// the entry block. + FunctionPass *createUnreachableBlockEliminationPass(); + + /// MachineFunctionPrinter pass - This pass prints out the machine function to + /// standard error, as a debugging tool. + FunctionPass *createMachineFunctionPrinterPass(std::ostream *OS, + const std::string &Banner =""); + + /// MachineLoopInfo pass - This pass is a loop analysis pass. + /// + extern const PassInfo *const MachineLoopInfoID; + + /// MachineDominators pass - This pass is a machine dominators analysis pass. + /// + extern const PassInfo *const MachineDominatorsID; + + /// PHIElimination pass - This pass eliminates machine instruction PHI nodes + /// by inserting copy instructions. This destroys SSA information, but is the + /// desired input for some register allocators. This pass is "required" by + /// these register allocator like this: AU.addRequiredID(PHIEliminationID); + /// + extern const PassInfo *const PHIEliminationID; + + /// StrongPHIElimination pass - This pass eliminates machine instruction PHI + /// nodes by inserting copy instructions. This destroys SSA information, but + /// is the desired input for some register allocators. This pass is + /// "required" by these register allocator like this: + /// AU.addRequiredID(PHIEliminationID); + /// This pass is still in development + extern const PassInfo *const StrongPHIEliminationID; + + extern const PassInfo *const PreAllocSplittingID; + + /// SimpleRegisterCoalescing pass. Aggressively coalesces every register + /// copy it can. + /// + extern const PassInfo *const SimpleRegisterCoalescingID; + + /// TwoAddressInstruction pass - This pass reduces two-address instructions to + /// use two operands. This destroys SSA information but it is desired by + /// register allocators. + extern const PassInfo *const TwoAddressInstructionPassID; + + /// UnreachableMachineBlockElimination pass - This pass removes unreachable + /// machine basic blocks. + extern const PassInfo *const UnreachableMachineBlockElimID; + + /// DeadMachineInstructionElim pass - This pass removes dead machine + /// instructions. + /// + FunctionPass *createDeadMachineInstructionElimPass(); + + /// Creates a register allocator as the user specified on the command line. + /// + FunctionPass *createRegisterAllocator(); + + /// SimpleRegisterAllocation Pass - This pass converts the input machine code + /// from SSA form to use explicit registers by spilling every register. Wow, + /// great policy huh? + /// + FunctionPass *createSimpleRegisterAllocator(); + + /// LocalRegisterAllocation Pass - This pass register allocates the input code + /// a basic block at a time, yielding code better than the simple register + /// allocator, but not as good as a global allocator. + /// + FunctionPass *createLocalRegisterAllocator(); + + /// BigBlockRegisterAllocation Pass - The BigBlock register allocator + /// munches single basic blocks at a time, like the local register + /// allocator. While the BigBlock allocator is a little slower, and uses + /// somewhat more memory than the local register allocator, it tends to + /// yield the best allocations (of any of the allocators) for blocks that + /// have hundreds or thousands of instructions in sequence. + /// + FunctionPass *createBigBlockRegisterAllocator(); + + /// LinearScanRegisterAllocation Pass - This pass implements the linear scan + /// register allocation algorithm, a global register allocator. + /// + FunctionPass *createLinearScanRegisterAllocator(); + + /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean + /// Quadratic Prograaming (PBQP) based register allocator. + /// + FunctionPass *createPBQPRegisterAllocator(); + + /// SimpleRegisterCoalescing Pass - Coalesce all copies possible. Can run + /// independently of the register allocator. + /// + RegisterCoalescer *createSimpleRegisterCoalescer(); + + /// PrologEpilogCodeInserter Pass - This pass inserts prolog and epilog code, + /// and eliminates abstract frame references. + /// + FunctionPass *createPrologEpilogCodeInserter(); + + /// LowerSubregs Pass - This pass lowers subregs to register-register copies + /// which yields suboptimal, but correct code if the register allocator + /// cannot coalesce all subreg operations during allocation. + /// + FunctionPass *createLowerSubregsPass(); + + /// createPostRAScheduler - under development. + FunctionPass *createPostRAScheduler(); + + /// BranchFolding Pass - This pass performs machine code CFG based + /// optimizations to delete branches to branches, eliminate branches to + /// successor blocks (creating fall throughs), and eliminating branches over + /// branches. + FunctionPass *createBranchFoldingPass(bool DefaultEnableTailMerge); + + /// IfConverter Pass - This pass performs machine code if conversion. + FunctionPass *createIfConverterPass(); + + /// Code Placement Pass - This pass optimize code placement and aligns loop + /// headers to target specific alignment boundary. + FunctionPass *createCodePlacementOptPass(); + + /// DebugLabelFoldingPass - This pass prunes out redundant debug labels. This + /// allows a debug emitter to determine if the range of two labels is empty, + /// by seeing if the labels map to the same reduced label. + FunctionPass *createDebugLabelFoldingPass(); + + /// MachineCodeDeletion Pass - This pass deletes all of the machine code for + /// the current function, which should happen after the function has been + /// emitted to a .s file or to memory. + FunctionPass *createMachineCodeDeleter(); + + /// getRegisterAllocator - This creates an instance of the register allocator + /// for the Sparc. + FunctionPass *getRegisterAllocator(TargetMachine &T); + + /// IntrinsicLowering Pass - Performs target-independent LLVM IR + /// transformations for highly portable strategies. + FunctionPass *createGCLoweringPass(); + + /// MachineCodeAnalysis Pass - Target-independent pass to mark safe points in + /// machine code. Must be added very late during code generation, just prior + /// to output, and importantly after all CFG transformations (such as branch + /// folding). + FunctionPass *createGCMachineCodeAnalysisPass(); + + /// Deleter Pass - Releases GC metadata. + /// + FunctionPass *createGCInfoDeleter(); + + /// Creates a pass to print GC metadata. + /// + FunctionPass *createGCInfoPrinter(std::ostream &OS); + + /// createMachineLICMPass - This pass performs LICM on machine instructions. + /// + FunctionPass *createMachineLICMPass(); + + /// createMachineSinkingPass - This pass performs sinking on machine + /// instructions. + FunctionPass *createMachineSinkingPass(); + + /// createStackSlotColoringPass - This pass performs stack slot coloring. + FunctionPass *createStackSlotColoringPass(bool); + + /// createStackProtectorPass - This pass adds stack protectors to functions. + FunctionPass *createStackProtectorPass(const TargetLowering *tli); + + /// createMachineVerifierPass - This pass verifies cenerated machine code + /// instructions for correctness. + /// + /// @param allowPhysDoubleDefs ignore double definitions of + /// registers. Useful before LiveVariables has run. + FunctionPass *createMachineVerifierPass(bool allowDoubleDefs); + + /// createDwarfEHPass - This pass mulches exception handling code into a form + /// adapted to code generation. Required if using dwarf exception handling. + FunctionPass *createDwarfEHPass(const TargetLowering *tli, bool fast); + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/PseudoSourceValue.h b/include/llvm/CodeGen/PseudoSourceValue.h new file mode 100644 index 0000000..3ad2502 --- /dev/null +++ b/include/llvm/CodeGen/PseudoSourceValue.h @@ -0,0 +1,71 @@ +//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the PseudoSourceValue class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H +#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H + +#include "llvm/Value.h" + +namespace llvm { + class MachineFrameInfo; + class raw_ostream; + + /// PseudoSourceValue - Special value supplied for machine level alias + /// analysis. It indicates that the a memory access references the functions + /// stack frame (e.g., a spill slot), below the stack frame (e.g., argument + /// space), or constant pool. + class PseudoSourceValue : public Value { + public: + PseudoSourceValue(); + + /// dump - Support for debugging, callable in GDB: V->dump() + // + virtual void dump() const; + + /// print - Implement operator<< on PseudoSourceValue. + /// + virtual void print(raw_ostream &OS) const; + + /// isConstant - Test whether this PseudoSourceValue has a constant value. + /// + virtual bool isConstant(const MachineFrameInfo *) const; + + /// classof - Methods for support type inquiry through isa, cast, and + /// dyn_cast: + /// + static inline bool classof(const PseudoSourceValue *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == PseudoSourceValueVal; + } + + /// A pseudo source value referencing a fixed stack frame entry, + /// e.g., a spill slot. + static const PseudoSourceValue *getFixedStack(int FI); + + /// A source value referencing the area below the stack frame of a function, + /// e.g., the argument space. + static const PseudoSourceValue *getStack(); + + /// A source value referencing the global offset table (or something the + /// like). + static const PseudoSourceValue *getGOT(); + + /// A SV referencing the constant pool + static const PseudoSourceValue *getConstantPool(); + + /// A SV referencing the jump table + static const PseudoSourceValue *getJumpTable(); + }; +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/RegAllocRegistry.h b/include/llvm/CodeGen/RegAllocRegistry.h new file mode 100644 index 0000000..a08e42a --- /dev/null +++ b/include/llvm/CodeGen/RegAllocRegistry.h @@ -0,0 +1,64 @@ +//===-- llvm/CodeGen/RegAllocRegistry.h -------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation for register allocator function +// pass registry (RegisterRegAlloc). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGENREGALLOCREGISTRY_H +#define LLVM_CODEGENREGALLOCREGISTRY_H + +#include "llvm/CodeGen/MachinePassRegistry.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// RegisterRegAlloc class - Track the registration of register allocators. +/// +//===----------------------------------------------------------------------===// +class RegisterRegAlloc : public MachinePassRegistryNode { + +public: + + typedef FunctionPass *(*FunctionPassCtor)(); + + static MachinePassRegistry Registry; + + RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C) + : MachinePassRegistryNode(N, D, (MachinePassCtor)C) + { Registry.Add(this); } + ~RegisterRegAlloc() { Registry.Remove(this); } + + + // Accessors. + // + RegisterRegAlloc *getNext() const { + return (RegisterRegAlloc *)MachinePassRegistryNode::getNext(); + } + static RegisterRegAlloc *getList() { + return (RegisterRegAlloc *)Registry.getList(); + } + static FunctionPassCtor getDefault() { + return (FunctionPassCtor)Registry.getDefault(); + } + static void setDefault(FunctionPassCtor C) { + Registry.setDefault((MachinePassCtor)C); + } + static void setListener(MachinePassRegistryListener *L) { + Registry.setListener(L); + } + +}; + +} // end namespace llvm + + +#endif diff --git a/include/llvm/CodeGen/RegisterCoalescer.h b/include/llvm/CodeGen/RegisterCoalescer.h new file mode 100644 index 0000000..79dd9db --- /dev/null +++ b/include/llvm/CodeGen/RegisterCoalescer.h @@ -0,0 +1,154 @@ +//===-- RegisterCoalescer.h - Register Coalescing Interface ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the abstract interface for register coalescers, +// allowing them to interact with and query register allocators. +// +//===----------------------------------------------------------------------===// + +#include "llvm/System/IncludeFile.h" +#include "llvm/CodeGen/LiveInterval.h" +#include "llvm/ADT/SmallPtrSet.h" + +#ifndef LLVM_CODEGEN_REGISTER_COALESCER_H +#define LLVM_CODEGEN_REGISTER_COALESCER_H + +namespace llvm { + + class MachineFunction; + class RegallocQuery; + class AnalysisUsage; + class MachineInstr; + + /// An abstract interface for register coalescers. Coalescers must + /// implement this interface to be part of the coalescer analysis + /// group. + class RegisterCoalescer { + public: + static char ID; // Class identification, replacement for typeinfo + RegisterCoalescer() {} + virtual ~RegisterCoalescer(); // We want to be subclassed + + /// Run the coalescer on this function, providing interference + /// data to query. Return whether we removed any copies. + virtual bool coalesceFunction(MachineFunction &mf, + RegallocQuery &ifd) = 0; + + /// Reset state. Can be used to allow a coalescer run by + /// PassManager to be run again by the register allocator. + virtual void reset(MachineFunction &mf) {}; + + /// Register allocators must call this from their own + /// getAnalysisUsage to cover the case where the coalescer is not + /// a Pass in the proper sense and isn't managed by PassManager. + /// PassManager needs to know which analyses to make available and + /// which to invalidate when running the register allocator or any + /// pass that might call coalescing. The long-term solution is to + /// allow hierarchies of PassManagers. + virtual void getAnalysisUsage(AnalysisUsage &AU) const {}; + }; + + /// An abstract interface for register allocators to interact with + /// coalescers + /// + /// Example: + /// + /// This is simply an example of how to use the RegallocQuery + /// interface. It is not meant to be used in production. + /// + /// class LinearScanRegallocQuery : public RegallocQuery { + /// private: + /// const LiveIntervals \&li; + /// + /// public: + /// LinearScanRegallocQuery(LiveIntervals &intervals) + /// : li(intervals) {}; + /// + /// /// This is pretty slow and conservative, but since linear scan + /// /// allocation doesn't pre-compute interference information it's + /// /// the best we can do. Coalescers are always free to ignore this + /// /// and implement their own discovery strategy. See + /// /// SimpleRegisterCoalescing for an example. + /// void getInterferences(IntervalSet &interferences, + /// const LiveInterval &a) const { + /// for(LiveIntervals::const_iterator iv = li.begin(), + /// ivend = li.end(); + /// iv != ivend; + /// ++iv) { + /// if (interfere(a, iv->second)) { + /// interferences.insert(&iv->second); + /// } + /// } + /// }; + /// + /// /// This is *really* slow and stupid. See above. + /// int getNumberOfInterferences(const LiveInterval &a) const { + /// IntervalSet intervals; + /// getInterferences(intervals, a); + /// return intervals.size(); + /// }; + /// }; + /// + /// In the allocator: + /// + /// RegisterCoalescer &coalescer = getAnalysis<RegisterCoalescer>(); + /// + /// // We don't reset the coalescer so if it's already been run this + /// // takes almost no time. + /// LinearScanRegallocQuery ifd(*li_); + /// coalescer.coalesceFunction(fn, ifd); + /// + class RegallocQuery { + public: + typedef SmallPtrSet<const LiveInterval *, 8> IntervalSet; + + virtual ~RegallocQuery() {}; + + /// Return whether two live ranges interfere. + virtual bool interfere(const LiveInterval &a, + const LiveInterval &b) const { + // A naive test + return a.overlaps(b); + }; + + /// Return the set of intervals that interfere with this one. + virtual void getInterferences(IntervalSet &interferences, + const LiveInterval &a) const = 0; + + /// This can often be cheaper than actually returning the + /// interferences. + virtual int getNumberOfInterferences(const LiveInterval &a) const = 0; + + /// Make any data structure updates necessary to reflect + /// coalescing or other modifications. + virtual void updateDataForMerge(const LiveInterval &a, + const LiveInterval &b, + const MachineInstr ©) {}; + + /// Allow the register allocator to communicate when it doesn't + /// want a copy coalesced. This may be due to assumptions made by + /// the allocator about various invariants and so this question is + /// a matter of legality, not performance. Performance decisions + /// about which copies to coalesce should be made by the + /// coalescer. + virtual bool isLegalToCoalesce(const MachineInstr &inst) const { + return true; + } + }; +} + +// Because of the way .a files work, we must force the SimpleRC +// implementation to be pulled in if the RegisterCoalescing header is +// included. Otherwise we run the risk of RegisterCoalescing being +// used, but the default implementation not being linked into the tool +// that uses it. +FORCE_DEFINING_FILE_TO_BE_LINKED(RegisterCoalescer) +FORCE_DEFINING_FILE_TO_BE_LINKED(SimpleRegisterCoalescing) + +#endif diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h new file mode 100644 index 0000000..a4ed012 --- /dev/null +++ b/include/llvm/CodeGen/RegisterScavenging.h @@ -0,0 +1,178 @@ +//===-- RegisterScavenging.h - Machine register scavenging ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the machine register scavenger class. It can provide +// information such as unused register at any point in a machine basic block. +// It also provides a mechanism to make registers availbale by evicting them +// to spill slots. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_REGISTER_SCAVENGING_H +#define LLVM_CODEGEN_REGISTER_SCAVENGING_H + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + +class MachineRegisterInfo; +class TargetRegisterInfo; +class TargetInstrInfo; +class TargetRegisterClass; + +class RegScavenger { + const TargetRegisterInfo *TRI; + const TargetInstrInfo *TII; + MachineRegisterInfo* MRI; + MachineBasicBlock *MBB; + MachineBasicBlock::iterator MBBI; + unsigned NumPhysRegs; + + /// Tracking - True if RegScavenger is currently tracking the liveness of + /// registers. + bool Tracking; + + /// ScavengingFrameIndex - Special spill slot used for scavenging a register + /// post register allocation. + int ScavengingFrameIndex; + + /// ScavengedReg - If none zero, the specific register is currently being + /// scavenged. That is, it is spilled to the special scavenging stack slot. + unsigned ScavengedReg; + + /// ScavengedRC - Register class of the scavenged register. + /// + const TargetRegisterClass *ScavengedRC; + + /// ScavengeRestore - Instruction that restores the scavenged register from + /// stack. + const MachineInstr *ScavengeRestore; + + /// CalleeSavedrRegs - A bitvector of callee saved registers for the target. + /// + BitVector CalleeSavedRegs; + + /// ReservedRegs - A bitvector of reserved registers. + /// + BitVector ReservedRegs; + + /// RegsAvailable - The current state of all the physical registers immediately + /// before MBBI. One bit per physical register. If bit is set that means it's + /// available, unset means the register is currently being used. + BitVector RegsAvailable; + + /// ImplicitDefed - If bit is set that means the register is defined by an + /// implicit_def instructions. That means it can be clobbered at will. + BitVector ImplicitDefed; + + /// CurrDist - Distance from MBB entry to the current instruction MBBI. + /// + unsigned CurrDist; + + /// DistanceMap - Keep track the distance of a MI from the start of the + /// current basic block. + DenseMap<MachineInstr*, unsigned> DistanceMap; + +public: + RegScavenger() + : MBB(NULL), NumPhysRegs(0), Tracking(false), + ScavengingFrameIndex(-1), ScavengedReg(0), ScavengedRC(NULL) {} + + /// enterBasicBlock - Start tracking liveness from the begin of the specific + /// basic block. + void enterBasicBlock(MachineBasicBlock *mbb); + + /// forward / backward - Move the internal MBB iterator and update register + /// states. + void forward(); + void backward(); + + /// forward / backward - Move the internal MBB iterator and update register + /// states until it has processed the specific iterator. + void forward(MachineBasicBlock::iterator I) { + if (!Tracking && MBB->begin() != I) forward(); + while (MBBI != I) forward(); + } + void backward(MachineBasicBlock::iterator I) { + while (MBBI != I) backward(); + } + + /// skipTo - Move the internal MBB iterator but do not update register states. + /// + void skipTo(MachineBasicBlock::iterator I) { MBBI = I; } + + /// isReserved - Returns true if a register is reserved. It is never "unused". + bool isReserved(unsigned Reg) const { return ReservedRegs[Reg]; } + + /// isUsed / isUsed - Test if a register is currently being used. + /// + bool isUsed(unsigned Reg) const { return !RegsAvailable[Reg]; } + bool isUnused(unsigned Reg) const { return RegsAvailable[Reg]; } + + bool isImplicitlyDefined(unsigned Reg) const { return ImplicitDefed[Reg]; } + + /// getRegsUsed - return all registers currently in use in used. + void getRegsUsed(BitVector &used, bool includeReserved); + + /// setUsed / setUnused - Mark the state of one or a number of registers. + /// + void setUsed(unsigned Reg, bool ImpDef = false); + void setUsed(BitVector &Regs, bool ImpDef = false) { + RegsAvailable &= ~Regs; + if (ImpDef) + ImplicitDefed |= Regs; + else + ImplicitDefed &= ~Regs; + } + void setUnused(unsigned Reg, const MachineInstr *MI); + void setUnused(BitVector &Regs) { + RegsAvailable |= Regs; + ImplicitDefed &= ~Regs; + } + + /// FindUnusedReg - Find a unused register of the specified register class + /// from the specified set of registers. It return 0 is none is found. + unsigned FindUnusedReg(const TargetRegisterClass *RegClass, + const BitVector &Candidates) const; + + /// FindUnusedReg - Find a unused register of the specified register class. + /// Exclude callee saved registers if directed. It return 0 is none is found. + unsigned FindUnusedReg(const TargetRegisterClass *RegClass, + bool ExCalleeSaved = false) const; + + /// setScavengingFrameIndex / getScavengingFrameIndex - accessor and setter of + /// ScavengingFrameIndex. + void setScavengingFrameIndex(int FI) { ScavengingFrameIndex = FI; } + int getScavengingFrameIndex() const { return ScavengingFrameIndex; } + + /// scavengeRegister - Make a register of the specific register class + /// available and do the appropriate bookkeeping. SPAdj is the stack + /// adjustment due to call frame, it's passed along to eliminateFrameIndex(). + /// Returns the scavenged register. + unsigned scavengeRegister(const TargetRegisterClass *RegClass, + MachineBasicBlock::iterator I, int SPAdj); + unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) { + return scavengeRegister(RegClass, MBBI, SPAdj); + } + +private: + /// restoreScavengedReg - Restore scavenged by loading it back from the + /// emergency spill slot. Mark it used. + void restoreScavengedReg(); + + MachineInstr *findFirstUse(MachineBasicBlock *MBB, + MachineBasicBlock::iterator I, unsigned Reg, + unsigned &Dist); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h new file mode 100644 index 0000000..dd76fcc --- /dev/null +++ b/include/llvm/CodeGen/RuntimeLibcalls.h @@ -0,0 +1,255 @@ +//===-- CodeGen/RuntimeLibcall.h - Runtime Library Calls --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the enum representing the list of runtime library calls +// the backend may emit during code generation, and also some helper functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H +#define LLVM_CODEGEN_RUNTIMELIBCALLS_H + +#include "llvm/CodeGen/ValueTypes.h" + +namespace llvm { +namespace RTLIB { + /// RTLIB::Libcall enum - This enum defines all of the runtime library calls + /// the backend can emit. The various long double types cannot be merged, + /// because 80-bit library functions use "xf" and 128-bit use "tf". + /// + /// When adding PPCF128 functions here, note that their names generally need + /// to be overridden for Darwin with the xxx$LDBL128 form. See + /// PPCISelLowering.cpp. + /// + enum Libcall { + // Integer + SHL_I16, + SHL_I32, + SHL_I64, + SHL_I128, + SRL_I16, + SRL_I32, + SRL_I64, + SRL_I128, + SRA_I16, + SRA_I32, + SRA_I64, + SRA_I128, + MUL_I16, + MUL_I32, + MUL_I64, + MUL_I128, + SDIV_I16, + SDIV_I32, + SDIV_I64, + SDIV_I128, + UDIV_I16, + UDIV_I32, + UDIV_I64, + UDIV_I128, + SREM_I16, + SREM_I32, + SREM_I64, + SREM_I128, + UREM_I16, + UREM_I32, + UREM_I64, + UREM_I128, + NEG_I32, + NEG_I64, + + // FLOATING POINT + ADD_F32, + ADD_F64, + ADD_F80, + ADD_PPCF128, + SUB_F32, + SUB_F64, + SUB_F80, + SUB_PPCF128, + MUL_F32, + MUL_F64, + MUL_F80, + MUL_PPCF128, + DIV_F32, + DIV_F64, + DIV_F80, + DIV_PPCF128, + REM_F32, + REM_F64, + REM_F80, + REM_PPCF128, + POWI_F32, + POWI_F64, + POWI_F80, + POWI_PPCF128, + SQRT_F32, + SQRT_F64, + SQRT_F80, + SQRT_PPCF128, + LOG_F32, + LOG_F64, + LOG_F80, + LOG_PPCF128, + LOG2_F32, + LOG2_F64, + LOG2_F80, + LOG2_PPCF128, + LOG10_F32, + LOG10_F64, + LOG10_F80, + LOG10_PPCF128, + EXP_F32, + EXP_F64, + EXP_F80, + EXP_PPCF128, + EXP2_F32, + EXP2_F64, + EXP2_F80, + EXP2_PPCF128, + SIN_F32, + SIN_F64, + SIN_F80, + SIN_PPCF128, + COS_F32, + COS_F64, + COS_F80, + COS_PPCF128, + POW_F32, + POW_F64, + POW_F80, + POW_PPCF128, + CEIL_F32, + CEIL_F64, + CEIL_F80, + CEIL_PPCF128, + TRUNC_F32, + TRUNC_F64, + TRUNC_F80, + TRUNC_PPCF128, + RINT_F32, + RINT_F64, + RINT_F80, + RINT_PPCF128, + NEARBYINT_F32, + NEARBYINT_F64, + NEARBYINT_F80, + NEARBYINT_PPCF128, + FLOOR_F32, + FLOOR_F64, + FLOOR_F80, + FLOOR_PPCF128, + + // CONVERSION + FPEXT_F32_F64, + FPROUND_F64_F32, + FPROUND_F80_F32, + FPROUND_PPCF128_F32, + FPROUND_F80_F64, + FPROUND_PPCF128_F64, + FPTOSINT_F32_I32, + FPTOSINT_F32_I64, + FPTOSINT_F32_I128, + FPTOSINT_F64_I32, + FPTOSINT_F64_I64, + FPTOSINT_F64_I128, + FPTOSINT_F80_I32, + FPTOSINT_F80_I64, + FPTOSINT_F80_I128, + FPTOSINT_PPCF128_I32, + FPTOSINT_PPCF128_I64, + FPTOSINT_PPCF128_I128, + FPTOUINT_F32_I32, + FPTOUINT_F32_I64, + FPTOUINT_F32_I128, + FPTOUINT_F64_I32, + FPTOUINT_F64_I64, + FPTOUINT_F64_I128, + FPTOUINT_F80_I32, + FPTOUINT_F80_I64, + FPTOUINT_F80_I128, + FPTOUINT_PPCF128_I32, + FPTOUINT_PPCF128_I64, + FPTOUINT_PPCF128_I128, + SINTTOFP_I32_F32, + SINTTOFP_I32_F64, + SINTTOFP_I32_F80, + SINTTOFP_I32_PPCF128, + SINTTOFP_I64_F32, + SINTTOFP_I64_F64, + SINTTOFP_I64_F80, + SINTTOFP_I64_PPCF128, + SINTTOFP_I128_F32, + SINTTOFP_I128_F64, + SINTTOFP_I128_F80, + SINTTOFP_I128_PPCF128, + UINTTOFP_I32_F32, + UINTTOFP_I32_F64, + UINTTOFP_I32_F80, + UINTTOFP_I32_PPCF128, + UINTTOFP_I64_F32, + UINTTOFP_I64_F64, + UINTTOFP_I64_F80, + UINTTOFP_I64_PPCF128, + UINTTOFP_I128_F32, + UINTTOFP_I128_F64, + UINTTOFP_I128_F80, + UINTTOFP_I128_PPCF128, + + // COMPARISON + OEQ_F32, + OEQ_F64, + UNE_F32, + UNE_F64, + OGE_F32, + OGE_F64, + OLT_F32, + OLT_F64, + OLE_F32, + OLE_F64, + OGT_F32, + OGT_F64, + UO_F32, + UO_F64, + O_F32, + O_F64, + + // EXCEPTION HANDLING + UNWIND_RESUME, + + UNKNOWN_LIBCALL + }; + + /// getFPEXT - Return the FPEXT_*_* value for the given types, or + /// UNKNOWN_LIBCALL if there is none. + Libcall getFPEXT(MVT OpVT, MVT RetVT); + + /// getFPROUND - Return the FPROUND_*_* value for the given types, or + /// UNKNOWN_LIBCALL if there is none. + Libcall getFPROUND(MVT OpVT, MVT RetVT); + + /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or + /// UNKNOWN_LIBCALL if there is none. + Libcall getFPTOSINT(MVT OpVT, MVT RetVT); + + /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or + /// UNKNOWN_LIBCALL if there is none. + Libcall getFPTOUINT(MVT OpVT, MVT RetVT); + + /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or + /// UNKNOWN_LIBCALL if there is none. + Libcall getSINTTOFP(MVT OpVT, MVT RetVT); + + /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or + /// UNKNOWN_LIBCALL if there is none. + Libcall getUINTTOFP(MVT OpVT, MVT RetVT); +} +} + +#endif diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h new file mode 100644 index 0000000..237d491 --- /dev/null +++ b/include/llvm/CodeGen/ScheduleDAG.h @@ -0,0 +1,666 @@ +//===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ScheduleDAG class, which is used as the common +// base class for instruction schedulers. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SCHEDULEDAG_H +#define LLVM_CODEGEN_SCHEDULEDAG_H + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/PointerIntPair.h" + +namespace llvm { + class SUnit; + class MachineConstantPool; + class MachineFunction; + class MachineModuleInfo; + class MachineRegisterInfo; + class MachineInstr; + class TargetRegisterInfo; + class ScheduleDAG; + class SDNode; + class TargetInstrInfo; + class TargetInstrDesc; + class TargetLowering; + class TargetMachine; + class TargetRegisterClass; + template<class Graph> class GraphWriter; + + /// SDep - Scheduling dependency. This represents one direction of an + /// edge in the scheduling DAG. + class SDep { + public: + /// Kind - These are the different kinds of scheduling dependencies. + enum Kind { + Data, ///< Regular data dependence (aka true-dependence). + Anti, ///< A register anti-dependedence (aka WAR). + Output, ///< A register output-dependence (aka WAW). + Order ///< Any other ordering dependency. + }; + + private: + /// Dep - A pointer to the depending/depended-on SUnit, and an enum + /// indicating the kind of the dependency. + PointerIntPair<SUnit *, 2, Kind> Dep; + + /// Contents - A union discriminated by the dependence kind. + union { + /// Reg - For Data, Anti, and Output dependencies, the associated + /// register. For Data dependencies that don't currently have a register + /// assigned, this is set to zero. + unsigned Reg; + + /// Order - Additional information about Order dependencies. + struct { + /// isNormalMemory - True if both sides of the dependence + /// access memory in non-volatile and fully modeled ways. + bool isNormalMemory : 1; + + /// isMustAlias - True if both sides of the dependence are known to + /// access the same memory. + bool isMustAlias : 1; + + /// isArtificial - True if this is an artificial dependency, meaning + /// it is not necessary for program correctness, and may be safely + /// deleted if necessary. + bool isArtificial : 1; + } Order; + } Contents; + + /// Latency - The time associated with this edge. Often this is just + /// the value of the Latency field of the predecessor, however advanced + /// models may provide additional information about specific edges. + unsigned Latency; + + public: + /// SDep - Construct a null SDep. This is only for use by container + /// classes which require default constructors. SUnits may not + /// have null SDep edges. + SDep() : Dep(0, Data) {} + + /// SDep - Construct an SDep with the specified values. + SDep(SUnit *S, Kind kind, unsigned latency = 1, unsigned Reg = 0, + bool isNormalMemory = false, bool isMustAlias = false, + bool isArtificial = false) + : Dep(S, kind), Contents(), Latency(latency) { + switch (kind) { + case Anti: + case Output: + assert(Reg != 0 && + "SDep::Anti and SDep::Output must use a non-zero Reg!"); + // fall through + case Data: + assert(!isMustAlias && "isMustAlias only applies with SDep::Order!"); + assert(!isArtificial && "isArtificial only applies with SDep::Order!"); + Contents.Reg = Reg; + break; + case Order: + assert(Reg == 0 && "Reg given for non-register dependence!"); + Contents.Order.isNormalMemory = isNormalMemory; + Contents.Order.isMustAlias = isMustAlias; + Contents.Order.isArtificial = isArtificial; + break; + } + } + + bool operator==(const SDep &Other) const { + if (Dep != Other.Dep || Latency != Other.Latency) return false; + switch (Dep.getInt()) { + case Data: + case Anti: + case Output: + return Contents.Reg == Other.Contents.Reg; + case Order: + return Contents.Order.isNormalMemory == + Other.Contents.Order.isNormalMemory && + Contents.Order.isMustAlias == Other.Contents.Order.isMustAlias && + Contents.Order.isArtificial == Other.Contents.Order.isArtificial; + } + assert(0 && "Invalid dependency kind!"); + return false; + } + + bool operator!=(const SDep &Other) const { + return !operator==(Other); + } + + /// getLatency - Return the latency value for this edge, which roughly + /// means the minimum number of cycles that must elapse between the + /// predecessor and the successor, given that they have this edge + /// between them. + unsigned getLatency() const { + return Latency; + } + + //// getSUnit - Return the SUnit to which this edge points. + SUnit *getSUnit() const { + return Dep.getPointer(); + } + + //// setSUnit - Assign the SUnit to which this edge points. + void setSUnit(SUnit *SU) { + Dep.setPointer(SU); + } + + /// getKind - Return an enum value representing the kind of the dependence. + Kind getKind() const { + return Dep.getInt(); + } + + /// isCtrl - Shorthand for getKind() != SDep::Data. + bool isCtrl() const { + return getKind() != Data; + } + + /// isNormalMemory - Test if this is an Order dependence between two + /// memory accesses where both sides of the dependence access memory + /// in non-volatile and fully modeled ways. + bool isNormalMemory() const { + return getKind() == Order && Contents.Order.isNormalMemory; + } + + /// isMustAlias - Test if this is an Order dependence that is marked + /// as "must alias", meaning that the SUnits at either end of the edge + /// have a memory dependence on a known memory location. + bool isMustAlias() const { + return getKind() == Order && Contents.Order.isMustAlias; + } + + /// isArtificial - Test if this is an Order dependence that is marked + /// as "artificial", meaning it isn't necessary for correctness. + bool isArtificial() const { + return getKind() == Order && Contents.Order.isArtificial; + } + + /// isAssignedRegDep - Test if this is a Data dependence that is + /// associated with a register. + bool isAssignedRegDep() const { + return getKind() == Data && Contents.Reg != 0; + } + + /// getReg - Return the register associated with this edge. This is + /// only valid on Data, Anti, and Output edges. On Data edges, this + /// value may be zero, meaning there is no associated register. + unsigned getReg() const { + assert((getKind() == Data || getKind() == Anti || getKind() == Output) && + "getReg called on non-register dependence edge!"); + return Contents.Reg; + } + + /// setReg - Assign the associated register for this edge. This is + /// only valid on Data, Anti, and Output edges. On Anti and Output + /// edges, this value must not be zero. On Data edges, the value may + /// be zero, which would mean that no specific register is associated + /// with this edge. + void setReg(unsigned Reg) { + assert((getKind() == Data || getKind() == Anti || getKind() == Output) && + "setReg called on non-register dependence edge!"); + assert((getKind() != Anti || Reg != 0) && + "SDep::Anti edge cannot use the zero register!"); + assert((getKind() != Output || Reg != 0) && + "SDep::Output edge cannot use the zero register!"); + Contents.Reg = Reg; + } + }; + + /// SUnit - Scheduling unit. This is a node in the scheduling DAG. + class SUnit { + private: + SDNode *Node; // Representative node. + MachineInstr *Instr; // Alternatively, a MachineInstr. + public: + SUnit *OrigNode; // If not this, the node from which + // this node was cloned. + + // Preds/Succs - The SUnits before/after us in the graph. The boolean value + // is true if the edge is a token chain edge, false if it is a value edge. + SmallVector<SDep, 4> Preds; // All sunit predecessors. + SmallVector<SDep, 4> Succs; // All sunit successors. + + typedef SmallVector<SDep, 4>::iterator pred_iterator; + typedef SmallVector<SDep, 4>::iterator succ_iterator; + typedef SmallVector<SDep, 4>::const_iterator const_pred_iterator; + typedef SmallVector<SDep, 4>::const_iterator const_succ_iterator; + + unsigned NodeNum; // Entry # of node in the node vector. + unsigned NodeQueueId; // Queue id of node. + unsigned short Latency; // Node latency. + short NumPreds; // # of SDep::Data preds. + short NumSuccs; // # of SDep::Data sucss. + short NumPredsLeft; // # of preds not scheduled. + short NumSuccsLeft; // # of succs not scheduled. + bool isTwoAddress : 1; // Is a two-address instruction. + bool isCommutable : 1; // Is a commutable instruction. + bool hasPhysRegDefs : 1; // Has physreg defs that are being used. + bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not. + bool isPending : 1; // True once pending. + bool isAvailable : 1; // True once available. + bool isScheduled : 1; // True once scheduled. + bool isScheduleHigh : 1; // True if preferable to schedule high. + bool isCloned : 1; // True if this node has been cloned. + private: + bool isDepthCurrent : 1; // True if Depth is current. + bool isHeightCurrent : 1; // True if Height is current. + unsigned Depth; // Node depth. + unsigned Height; // Node height. + public: + const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null. + const TargetRegisterClass *CopySrcRC; + + /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent + /// an SDNode and any nodes flagged to it. + SUnit(SDNode *node, unsigned nodenum) + : Node(node), Instr(0), OrigNode(0), NodeNum(nodenum), NodeQueueId(0), + Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0), + isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false), + hasPhysRegClobbers(false), + isPending(false), isAvailable(false), isScheduled(false), + isScheduleHigh(false), isCloned(false), + isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0), + CopyDstRC(NULL), CopySrcRC(NULL) {} + + /// SUnit - Construct an SUnit for post-regalloc scheduling to represent + /// a MachineInstr. + SUnit(MachineInstr *instr, unsigned nodenum) + : Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum), NodeQueueId(0), + Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0), + isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false), + hasPhysRegClobbers(false), + isPending(false), isAvailable(false), isScheduled(false), + isScheduleHigh(false), isCloned(false), + isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0), + CopyDstRC(NULL), CopySrcRC(NULL) {} + + /// SUnit - Construct a placeholder SUnit. + SUnit() + : Node(0), Instr(0), OrigNode(0), NodeNum(~0u), NodeQueueId(0), + Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0), + isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false), + hasPhysRegClobbers(false), + isPending(false), isAvailable(false), isScheduled(false), + isScheduleHigh(false), isCloned(false), + isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0), + CopyDstRC(NULL), CopySrcRC(NULL) {} + + /// setNode - Assign the representative SDNode for this SUnit. + /// This may be used during pre-regalloc scheduling. + void setNode(SDNode *N) { + assert(!Instr && "Setting SDNode of SUnit with MachineInstr!"); + Node = N; + } + + /// getNode - Return the representative SDNode for this SUnit. + /// This may be used during pre-regalloc scheduling. + SDNode *getNode() const { + assert(!Instr && "Reading SDNode of SUnit with MachineInstr!"); + return Node; + } + + /// setInstr - Assign the instruction for the SUnit. + /// This may be used during post-regalloc scheduling. + void setInstr(MachineInstr *MI) { + assert(!Node && "Setting MachineInstr of SUnit with SDNode!"); + Instr = MI; + } + + /// getInstr - Return the representative MachineInstr for this SUnit. + /// This may be used during post-regalloc scheduling. + MachineInstr *getInstr() const { + assert(!Node && "Reading MachineInstr of SUnit with SDNode!"); + return Instr; + } + + /// addPred - This adds the specified edge as a pred of the current node if + /// not already. It also adds the current node as a successor of the + /// specified node. + void addPred(const SDep &D); + + /// removePred - This removes the specified edge as a pred of the current + /// node if it exists. It also removes the current node as a successor of + /// the specified node. + void removePred(const SDep &D); + + /// getDepth - Return the depth of this node, which is the length of the + /// maximum path up to any node with has no predecessors. + unsigned getDepth() const { + if (!isDepthCurrent) const_cast<SUnit *>(this)->ComputeDepth(); + return Depth; + } + + /// getHeight - Return the height of this node, which is the length of the + /// maximum path down to any node with has no successors. + unsigned getHeight() const { + if (!isHeightCurrent) const_cast<SUnit *>(this)->ComputeHeight(); + return Height; + } + + /// setDepthToAtLeast - If NewDepth is greater than this node's depth + /// value, set it to be the new depth value. This also recursively + /// marks successor nodes dirty. + void setDepthToAtLeast(unsigned NewDepth); + + /// setDepthToAtLeast - If NewDepth is greater than this node's depth + /// value, set it to be the new height value. This also recursively + /// marks predecessor nodes dirty. + void setHeightToAtLeast(unsigned NewHeight); + + /// setDepthDirty - Set a flag in this node to indicate that its + /// stored Depth value will require recomputation the next time + /// getDepth() is called. + void setDepthDirty(); + + /// setHeightDirty - Set a flag in this node to indicate that its + /// stored Height value will require recomputation the next time + /// getHeight() is called. + void setHeightDirty(); + + /// isPred - Test if node N is a predecessor of this node. + bool isPred(SUnit *N) { + for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i) + if (Preds[i].getSUnit() == N) + return true; + return false; + } + + /// isSucc - Test if node N is a successor of this node. + bool isSucc(SUnit *N) { + for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i) + if (Succs[i].getSUnit() == N) + return true; + return false; + } + + void dump(const ScheduleDAG *G) const; + void dumpAll(const ScheduleDAG *G) const; + void print(raw_ostream &O, const ScheduleDAG *G) const; + + private: + void ComputeDepth(); + void ComputeHeight(); + }; + + //===--------------------------------------------------------------------===// + /// SchedulingPriorityQueue - This interface is used to plug different + /// priorities computation algorithms into the list scheduler. It implements + /// the interface of a standard priority queue, where nodes are inserted in + /// arbitrary order and returned in priority order. The computation of the + /// priority and the representation of the queue are totally up to the + /// implementation to decide. + /// + class SchedulingPriorityQueue { + public: + virtual ~SchedulingPriorityQueue() {} + + virtual void initNodes(std::vector<SUnit> &SUnits) = 0; + virtual void addNode(const SUnit *SU) = 0; + virtual void updateNode(const SUnit *SU) = 0; + virtual void releaseState() = 0; + + virtual unsigned size() const = 0; + virtual bool empty() const = 0; + virtual void push(SUnit *U) = 0; + + virtual void push_all(const std::vector<SUnit *> &Nodes) = 0; + virtual SUnit *pop() = 0; + + virtual void remove(SUnit *SU) = 0; + + /// ScheduledNode - As each node is scheduled, this method is invoked. This + /// allows the priority function to adjust the priority of related + /// unscheduled nodes, for example. + /// + virtual void ScheduledNode(SUnit *) {} + + virtual void UnscheduledNode(SUnit *) {} + }; + + class ScheduleDAG { + public: + MachineBasicBlock *BB; // The block in which to insert instructions. + MachineBasicBlock::iterator InsertPos;// The position to insert instructions. + const TargetMachine &TM; // Target processor + const TargetInstrInfo *TII; // Target instruction information + const TargetRegisterInfo *TRI; // Target processor register info + const TargetLowering *TLI; // Target lowering info + MachineFunction &MF; // Machine function + MachineRegisterInfo &MRI; // Virtual/real register map + MachineConstantPool *ConstPool; // Target constant pool + std::vector<SUnit*> Sequence; // The schedule. Null SUnit*'s + // represent noop instructions. + std::vector<SUnit> SUnits; // The scheduling units. + SUnit EntrySU; // Special node for the region entry. + SUnit ExitSU; // Special node for the region exit. + + explicit ScheduleDAG(MachineFunction &mf); + + virtual ~ScheduleDAG(); + + /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered + /// using 'dot'. + /// + void viewGraph(); + + /// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock + /// according to the order specified in Sequence. + /// + virtual MachineBasicBlock *EmitSchedule() = 0; + + void dumpSchedule() const; + + virtual void dumpNode(const SUnit *SU) const = 0; + + /// getGraphNodeLabel - Return a label for an SUnit node in a visualization + /// of the ScheduleDAG. + virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0; + + /// addCustomGraphFeatures - Add custom features for a visualization of + /// the ScheduleDAG. + virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {} + +#ifndef NDEBUG + /// VerifySchedule - Verify that all SUnits were scheduled and that + /// their state is consistent. + void VerifySchedule(bool isBottomUp); +#endif + + protected: + /// Run - perform scheduling. + /// + void Run(MachineBasicBlock *bb, MachineBasicBlock::iterator insertPos); + + /// BuildSchedGraph - Build SUnits and set up their Preds and Succs + /// to form the scheduling dependency graph. + /// + virtual void BuildSchedGraph() = 0; + + /// ComputeLatency - Compute node latency. + /// + virtual void ComputeLatency(SUnit *SU) = 0; + + /// Schedule - Order nodes according to selected style, filling + /// in the Sequence member. + /// + virtual void Schedule() = 0; + + /// ForceUnitLatencies - Return true if all scheduling edges should be given a + /// latency value of one. The default is to return false; schedulers may + /// override this as needed. + virtual bool ForceUnitLatencies() const { return false; } + + /// EmitNoop - Emit a noop instruction. + /// + void EmitNoop(); + + void AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO); + + void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap); + + private: + /// EmitLiveInCopy - Emit a copy for a live in physical register. If the + /// physical register has only a single copy use, then coalesced the copy + /// if possible. + void EmitLiveInCopy(MachineBasicBlock *MBB, + MachineBasicBlock::iterator &InsertPos, + unsigned VirtReg, unsigned PhysReg, + const TargetRegisterClass *RC, + DenseMap<MachineInstr*, unsigned> &CopyRegMap); + + /// EmitLiveInCopies - If this is the first basic block in the function, + /// and if it has live ins that need to be copied into vregs, emit the + /// copies into the top of the block. + void EmitLiveInCopies(MachineBasicBlock *MBB); + }; + + class SUnitIterator : public forward_iterator<SUnit, ptrdiff_t> { + SUnit *Node; + unsigned Operand; + + SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {} + public: + bool operator==(const SUnitIterator& x) const { + return Operand == x.Operand; + } + bool operator!=(const SUnitIterator& x) const { return !operator==(x); } + + const SUnitIterator &operator=(const SUnitIterator &I) { + assert(I.Node == Node && "Cannot assign iterators to two different nodes!"); + Operand = I.Operand; + return *this; + } + + pointer operator*() const { + return Node->Preds[Operand].getSUnit(); + } + pointer operator->() const { return operator*(); } + + SUnitIterator& operator++() { // Preincrement + ++Operand; + return *this; + } + SUnitIterator operator++(int) { // Postincrement + SUnitIterator tmp = *this; ++*this; return tmp; + } + + static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); } + static SUnitIterator end (SUnit *N) { + return SUnitIterator(N, (unsigned)N->Preds.size()); + } + + unsigned getOperand() const { return Operand; } + const SUnit *getNode() const { return Node; } + /// isCtrlDep - Test if this is not an SDep::Data dependence. + bool isCtrlDep() const { + return getSDep().isCtrl(); + } + bool isArtificialDep() const { + return getSDep().isArtificial(); + } + const SDep &getSDep() const { + return Node->Preds[Operand]; + } + }; + + template <> struct GraphTraits<SUnit*> { + typedef SUnit NodeType; + typedef SUnitIterator ChildIteratorType; + static inline NodeType *getEntryNode(SUnit *N) { return N; } + static inline ChildIteratorType child_begin(NodeType *N) { + return SUnitIterator::begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return SUnitIterator::end(N); + } + }; + + template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> { + typedef std::vector<SUnit>::iterator nodes_iterator; + static nodes_iterator nodes_begin(ScheduleDAG *G) { + return G->SUnits.begin(); + } + static nodes_iterator nodes_end(ScheduleDAG *G) { + return G->SUnits.end(); + } + }; + + /// ScheduleDAGTopologicalSort is a class that computes a topological + /// ordering for SUnits and provides methods for dynamically updating + /// the ordering as new edges are added. + /// + /// This allows a very fast implementation of IsReachable, for example. + /// + class ScheduleDAGTopologicalSort { + /// SUnits - A reference to the ScheduleDAG's SUnits. + std::vector<SUnit> &SUnits; + + /// Index2Node - Maps topological index to the node number. + std::vector<int> Index2Node; + /// Node2Index - Maps the node number to its topological index. + std::vector<int> Node2Index; + /// Visited - a set of nodes visited during a DFS traversal. + BitVector Visited; + + /// DFS - make a DFS traversal and mark all nodes affected by the + /// edge insertion. These nodes will later get new topological indexes + /// by means of the Shift method. + void DFS(const SUnit *SU, int UpperBound, bool& HasLoop); + + /// Shift - reassign topological indexes for the nodes in the DAG + /// to preserve the topological ordering. + void Shift(BitVector& Visited, int LowerBound, int UpperBound); + + /// Allocate - assign the topological index to the node n. + void Allocate(int n, int index); + + public: + explicit ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits); + + /// InitDAGTopologicalSorting - create the initial topological + /// ordering from the DAG to be scheduled. + void InitDAGTopologicalSorting(); + + /// IsReachable - Checks if SU is reachable from TargetSU. + bool IsReachable(const SUnit *SU, const SUnit *TargetSU); + + /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU + /// will create a cycle. + bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); + + /// AddPred - Updates the topological ordering to accomodate an edge + /// to be added from SUnit X to SUnit Y. + void AddPred(SUnit *Y, SUnit *X); + + /// RemovePred - Updates the topological ordering to accomodate an + /// an edge to be removed from the specified node N from the predecessors + /// of the current node M. + void RemovePred(SUnit *M, SUnit *N); + + typedef std::vector<int>::iterator iterator; + typedef std::vector<int>::const_iterator const_iterator; + iterator begin() { return Index2Node.begin(); } + const_iterator begin() const { return Index2Node.begin(); } + iterator end() { return Index2Node.end(); } + const_iterator end() const { return Index2Node.end(); } + + typedef std::vector<int>::reverse_iterator reverse_iterator; + typedef std::vector<int>::const_reverse_iterator const_reverse_iterator; + reverse_iterator rbegin() { return Index2Node.rbegin(); } + const_reverse_iterator rbegin() const { return Index2Node.rbegin(); } + reverse_iterator rend() { return Index2Node.rend(); } + const_reverse_iterator rend() const { return Index2Node.rend(); } + }; +} + +#endif diff --git a/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/include/llvm/CodeGen/ScheduleHazardRecognizer.h new file mode 100644 index 0000000..369882d --- /dev/null +++ b/include/llvm/CodeGen/ScheduleHazardRecognizer.h @@ -0,0 +1,66 @@ +//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ScheduleHazardRecognizer class, which implements +// hazard-avoidance heuristics for scheduling. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H +#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H + +namespace llvm { + +class SUnit; + +/// HazardRecognizer - This determines whether or not an instruction can be +/// issued this cycle, and whether or not a noop needs to be inserted to handle +/// the hazard. +class ScheduleHazardRecognizer { +public: + virtual ~ScheduleHazardRecognizer(); + + enum HazardType { + NoHazard, // This instruction can be emitted at this cycle. + Hazard, // This instruction can't be emitted at this cycle. + NoopHazard // This instruction can't be emitted, and needs noops. + }; + + /// getHazardType - Return the hazard type of emitting this node. There are + /// three possible results. Either: + /// * NoHazard: it is legal to issue this instruction on this cycle. + /// * Hazard: issuing this instruction would stall the machine. If some + /// other instruction is available, issue it first. + /// * NoopHazard: issuing this instruction would break the program. If + /// some other instruction can be issued, do so, otherwise issue a noop. + virtual HazardType getHazardType(SUnit *) { + return NoHazard; + } + + /// EmitInstruction - This callback is invoked when an instruction is + /// emitted, to advance the hazard state. + virtual void EmitInstruction(SUnit *) {} + + /// AdvanceCycle - This callback is invoked when no instructions can be + /// issued on this cycle without a hazard. This should increment the + /// internal state of the hazard recognizer so that previously "Hazard" + /// instructions will now not be hazards. + virtual void AdvanceCycle() {} + + /// EmitNoop - This callback is invoked when a noop was added to the + /// instruction stream. + virtual void EmitNoop() { + // Default implementation: count it as a cycle. + AdvanceCycle(); + } +}; + +} + +#endif diff --git a/include/llvm/CodeGen/SchedulerRegistry.h b/include/llvm/CodeGen/SchedulerRegistry.h new file mode 100644 index 0000000..1cf64a0 --- /dev/null +++ b/include/llvm/CodeGen/SchedulerRegistry.h @@ -0,0 +1,93 @@ +//===-- llvm/CodeGen/SchedulerRegistry.h ------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation for instruction scheduler function +// pass registry (RegisterScheduler). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGENSCHEDULERREGISTRY_H +#define LLVM_CODEGENSCHEDULERREGISTRY_H + +#include "llvm/CodeGen/MachinePassRegistry.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// RegisterScheduler class - Track the registration of instruction schedulers. +/// +//===----------------------------------------------------------------------===// + +class SelectionDAGISel; +class ScheduleDAGSDNodes; +class SelectionDAG; +class MachineBasicBlock; + +class RegisterScheduler : public MachinePassRegistryNode { +public: + typedef ScheduleDAGSDNodes *(*FunctionPassCtor)(SelectionDAGISel*, + CodeGenOpt::Level); + + static MachinePassRegistry Registry; + + RegisterScheduler(const char *N, const char *D, FunctionPassCtor C) + : MachinePassRegistryNode(N, D, (MachinePassCtor)C) + { Registry.Add(this); } + ~RegisterScheduler() { Registry.Remove(this); } + + + // Accessors. + // + RegisterScheduler *getNext() const { + return (RegisterScheduler *)MachinePassRegistryNode::getNext(); + } + static RegisterScheduler *getList() { + return (RegisterScheduler *)Registry.getList(); + } + static FunctionPassCtor getDefault() { + return (FunctionPassCtor)Registry.getDefault(); + } + static void setDefault(FunctionPassCtor C) { + Registry.setDefault((MachinePassCtor)C); + } + static void setListener(MachinePassRegistryListener *L) { + Registry.setListener(L); + } +}; + +/// createBURRListDAGScheduler - This creates a bottom up register usage +/// reduction list scheduler. +ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); + +/// createTDRRListDAGScheduler - This creates a top down register usage +/// reduction list scheduler. +ScheduleDAGSDNodes *createTDRRListDAGScheduler(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); + +/// createTDListDAGScheduler - This creates a top-down list scheduler with +/// a hazard recognizer. +ScheduleDAGSDNodes *createTDListDAGScheduler(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); + +/// createFastDAGScheduler - This creates a "fast" scheduler. +/// +ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); + +/// createDefaultScheduler - This creates an instruction scheduler appropriate +/// for the target. +ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); + +} // end namespace llvm + +#endif diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h new file mode 100644 index 0000000..ec2d1d7 --- /dev/null +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -0,0 +1,880 @@ +//===-- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the SelectionDAG class, and transitively defines the +// SDNode class and subclasses. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SELECTIONDAG_H +#define LLVM_CODEGEN_SELECTIONDAG_H + +#include "llvm/ADT/ilist.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/Target/TargetMachine.h" +#include <cassert> +#include <vector> +#include <map> +#include <string> + +namespace llvm { + +class AliasAnalysis; +class TargetLowering; +class MachineModuleInfo; +class DwarfWriter; +class MachineFunction; +class MachineConstantPoolValue; +class FunctionLoweringInfo; + +template<> struct ilist_traits<SDNode> : public ilist_default_traits<SDNode> { +private: + mutable ilist_node<SDNode> Sentinel; +public: + SDNode *createSentinel() const { + return static_cast<SDNode*>(&Sentinel); + } + static void destroySentinel(SDNode *) {} + + SDNode *provideInitialHead() const { return createSentinel(); } + SDNode *ensureHead(SDNode*) const { return createSentinel(); } + static void noteHead(SDNode*, SDNode*) {} + + static void deleteNode(SDNode *) { + assert(0 && "ilist_traits<SDNode> shouldn't see a deleteNode call!"); + } +private: + static void createNode(const SDNode &); +}; + +enum CombineLevel { + Unrestricted, // Combine may create illegal operations and illegal types. + NoIllegalTypes, // Combine may create illegal operations but no illegal types. + NoIllegalOperations // Combine may only create legal operations and types. +}; + +/// SelectionDAG class - This is used to represent a portion of an LLVM function +/// in a low-level Data Dependence DAG representation suitable for instruction +/// selection. This DAG is constructed as the first step of instruction +/// selection in order to allow implementation of machine specific optimizations +/// and code simplifications. +/// +/// The representation used by the SelectionDAG is a target-independent +/// representation, which has some similarities to the GCC RTL representation, +/// but is significantly more simple, powerful, and is a graph form instead of a +/// linear form. +/// +class SelectionDAG { + TargetLowering &TLI; + MachineFunction *MF; + FunctionLoweringInfo &FLI; + MachineModuleInfo *MMI; + DwarfWriter *DW; + + /// EntryNode - The starting token. + SDNode EntryNode; + + /// Root - The root of the entire DAG. + SDValue Root; + + /// AllNodes - A linked list of nodes in the current DAG. + ilist<SDNode> AllNodes; + + /// NodeAllocatorType - The AllocatorType for allocating SDNodes. We use + /// pool allocation with recycling. + typedef RecyclingAllocator<BumpPtrAllocator, SDNode, sizeof(LargestSDNode), + AlignOf<MostAlignedSDNode>::Alignment> + NodeAllocatorType; + + /// NodeAllocator - Pool allocation for nodes. + NodeAllocatorType NodeAllocator; + + /// CSEMap - This structure is used to memoize nodes, automatically performing + /// CSE with existing nodes with a duplicate is requested. + FoldingSet<SDNode> CSEMap; + + /// OperandAllocator - Pool allocation for machine-opcode SDNode operands. + BumpPtrAllocator OperandAllocator; + + /// Allocator - Pool allocation for misc. objects that are created once per + /// SelectionDAG. + BumpPtrAllocator Allocator; + + /// VerifyNode - Sanity check the given node. Aborts if it is invalid. + void VerifyNode(SDNode *N); + + /// setGraphColorHelper - Implementation of setSubgraphColor. + /// Return whether we had to truncate the search. + /// + bool setSubgraphColorHelper(SDNode *N, const char *Color, + DenseSet<SDNode *> &visited, + int level, bool &printed); + +public: + SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli); + ~SelectionDAG(); + + /// init - Prepare this SelectionDAG to process code in the given + /// MachineFunction. + /// + void init(MachineFunction &mf, MachineModuleInfo *mmi, DwarfWriter *dw); + + /// clear - Clear state and free memory necessary to make this + /// SelectionDAG ready to process a new block. + /// + void clear(); + + MachineFunction &getMachineFunction() const { return *MF; } + const TargetMachine &getTarget() const; + TargetLowering &getTargetLoweringInfo() const { return TLI; } + FunctionLoweringInfo &getFunctionLoweringInfo() const { return FLI; } + MachineModuleInfo *getMachineModuleInfo() const { return MMI; } + DwarfWriter *getDwarfWriter() const { return DW; } + + /// viewGraph - Pop up a GraphViz/gv window with the DAG rendered using 'dot'. + /// + void viewGraph(const std::string &Title); + void viewGraph(); + +#ifndef NDEBUG + std::map<const SDNode *, std::string> NodeGraphAttrs; +#endif + + /// clearGraphAttrs - Clear all previously defined node graph attributes. + /// Intended to be used from a debugging tool (eg. gdb). + void clearGraphAttrs(); + + /// setGraphAttrs - Set graph attributes for a node. (eg. "color=red".) + /// + void setGraphAttrs(const SDNode *N, const char *Attrs); + + /// getGraphAttrs - Get graph attributes for a node. (eg. "color=red".) + /// Used from getNodeAttributes. + const std::string getGraphAttrs(const SDNode *N) const; + + /// setGraphColor - Convenience for setting node color attribute. + /// + void setGraphColor(const SDNode *N, const char *Color); + + /// setGraphColor - Convenience for setting subgraph color attribute. + /// + void setSubgraphColor(SDNode *N, const char *Color); + + typedef ilist<SDNode>::const_iterator allnodes_const_iterator; + allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); } + allnodes_const_iterator allnodes_end() const { return AllNodes.end(); } + typedef ilist<SDNode>::iterator allnodes_iterator; + allnodes_iterator allnodes_begin() { return AllNodes.begin(); } + allnodes_iterator allnodes_end() { return AllNodes.end(); } + ilist<SDNode>::size_type allnodes_size() const { + return AllNodes.size(); + } + + /// getRoot - Return the root tag of the SelectionDAG. + /// + const SDValue &getRoot() const { return Root; } + + /// getEntryNode - Return the token chain corresponding to the entry of the + /// function. + SDValue getEntryNode() const { + return SDValue(const_cast<SDNode *>(&EntryNode), 0); + } + + /// setRoot - Set the current root tag of the SelectionDAG. + /// + const SDValue &setRoot(SDValue N) { + assert((!N.getNode() || N.getValueType() == MVT::Other) && + "DAG root value is not a chain!"); + return Root = N; + } + + /// Combine - This iterates over the nodes in the SelectionDAG, folding + /// certain types of nodes together, or eliminating superfluous nodes. The + /// Level argument controls whether Combine is allowed to produce nodes and + /// types that are illegal on the target. + void Combine(CombineLevel Level, AliasAnalysis &AA, + CodeGenOpt::Level OptLevel); + + /// LegalizeTypes - This transforms the SelectionDAG into a SelectionDAG that + /// only uses types natively supported by the target. Returns "true" if it + /// made any changes. + /// + /// Note that this is an involved process that may invalidate pointers into + /// the graph. + bool LegalizeTypes(); + + /// Legalize - This transforms the SelectionDAG into a SelectionDAG that is + /// compatible with the target instruction selector, as indicated by the + /// TargetLowering object. + /// + /// Note that this is an involved process that may invalidate pointers into + /// the graph. + void Legalize(bool TypesNeedLegalizing, CodeGenOpt::Level OptLevel); + + /// LegalizeVectors - This transforms the SelectionDAG into a SelectionDAG + /// that only uses vector math operations supported by the target. This is + /// necessary as a separate step from Legalize because unrolling a vector + /// operation can introduce illegal types, which requires running + /// LegalizeTypes again. + /// + /// This returns true if it made any changes; in that case, LegalizeTypes + /// is called again before Legalize. + /// + /// Note that this is an involved process that may invalidate pointers into + /// the graph. + bool LegalizeVectors(); + + /// RemoveDeadNodes - This method deletes all unreachable nodes in the + /// SelectionDAG. + void RemoveDeadNodes(); + + /// DeleteNode - Remove the specified node from the system. This node must + /// have no referrers. + void DeleteNode(SDNode *N); + + /// getVTList - Return an SDVTList that represents the list of values + /// specified. + SDVTList getVTList(MVT VT); + SDVTList getVTList(MVT VT1, MVT VT2); + SDVTList getVTList(MVT VT1, MVT VT2, MVT VT3); + SDVTList getVTList(MVT VT1, MVT VT2, MVT VT3, MVT VT4); + SDVTList getVTList(const MVT *VTs, unsigned NumVTs); + + //===--------------------------------------------------------------------===// + // Node creation methods. + // + SDValue getConstant(uint64_t Val, MVT VT, bool isTarget = false); + SDValue getConstant(const APInt &Val, MVT VT, bool isTarget = false); + SDValue getConstant(const ConstantInt &Val, MVT VT, bool isTarget = false); + SDValue getIntPtrConstant(uint64_t Val, bool isTarget = false); + SDValue getTargetConstant(uint64_t Val, MVT VT) { + return getConstant(Val, VT, true); + } + SDValue getTargetConstant(const APInt &Val, MVT VT) { + return getConstant(Val, VT, true); + } + SDValue getTargetConstant(const ConstantInt &Val, MVT VT) { + return getConstant(Val, VT, true); + } + SDValue getConstantFP(double Val, MVT VT, bool isTarget = false); + SDValue getConstantFP(const APFloat& Val, MVT VT, bool isTarget = false); + SDValue getConstantFP(const ConstantFP &CF, MVT VT, bool isTarget = false); + SDValue getTargetConstantFP(double Val, MVT VT) { + return getConstantFP(Val, VT, true); + } + SDValue getTargetConstantFP(const APFloat& Val, MVT VT) { + return getConstantFP(Val, VT, true); + } + SDValue getTargetConstantFP(const ConstantFP &Val, MVT VT) { + return getConstantFP(Val, VT, true); + } + SDValue getGlobalAddress(const GlobalValue *GV, MVT VT, + int64_t offset = 0, bool isTargetGA = false); + SDValue getTargetGlobalAddress(const GlobalValue *GV, MVT VT, + int64_t offset = 0) { + return getGlobalAddress(GV, VT, offset, true); + } + SDValue getFrameIndex(int FI, MVT VT, bool isTarget = false); + SDValue getTargetFrameIndex(int FI, MVT VT) { + return getFrameIndex(FI, VT, true); + } + SDValue getJumpTable(int JTI, MVT VT, bool isTarget = false); + SDValue getTargetJumpTable(int JTI, MVT VT) { + return getJumpTable(JTI, VT, true); + } + SDValue getConstantPool(Constant *C, MVT VT, + unsigned Align = 0, int Offs = 0, bool isT=false); + SDValue getTargetConstantPool(Constant *C, MVT VT, + unsigned Align = 0, int Offset = 0) { + return getConstantPool(C, VT, Align, Offset, true); + } + SDValue getConstantPool(MachineConstantPoolValue *C, MVT VT, + unsigned Align = 0, int Offs = 0, bool isT=false); + SDValue getTargetConstantPool(MachineConstantPoolValue *C, + MVT VT, unsigned Align = 0, + int Offset = 0) { + return getConstantPool(C, VT, Align, Offset, true); + } + // When generating a branch to a BB, we don't in general know enough + // to provide debug info for the BB at that time, so keep this one around. + SDValue getBasicBlock(MachineBasicBlock *MBB); + SDValue getBasicBlock(MachineBasicBlock *MBB, DebugLoc dl); + SDValue getExternalSymbol(const char *Sym, MVT VT); + SDValue getExternalSymbol(const char *Sym, DebugLoc dl, MVT VT); + SDValue getTargetExternalSymbol(const char *Sym, MVT VT); + SDValue getTargetExternalSymbol(const char *Sym, DebugLoc dl, MVT VT); + SDValue getArgFlags(ISD::ArgFlagsTy Flags); + SDValue getValueType(MVT); + SDValue getRegister(unsigned Reg, MVT VT); + SDValue getDbgStopPoint(DebugLoc DL, SDValue Root, + unsigned Line, unsigned Col, Value *CU); + SDValue getLabel(unsigned Opcode, DebugLoc dl, SDValue Root, + unsigned LabelID); + + SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) { + return getNode(ISD::CopyToReg, dl, MVT::Other, Chain, + getRegister(Reg, N.getValueType()), N); + } + + // This version of the getCopyToReg method takes an extra operand, which + // indicates that there is potentially an incoming flag value (if Flag is not + // null) and that there should be a flag result. + SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N, + SDValue Flag) { + SDVTList VTs = getVTList(MVT::Other, MVT::Flag); + SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Flag }; + return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3); + } + + // Similar to last getCopyToReg() except parameter Reg is a SDValue + SDValue getCopyToReg(SDValue Chain, DebugLoc dl, SDValue Reg, SDValue N, + SDValue Flag) { + SDVTList VTs = getVTList(MVT::Other, MVT::Flag); + SDValue Ops[] = { Chain, Reg, N, Flag }; + return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3); + } + + SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, MVT VT) { + SDVTList VTs = getVTList(VT, MVT::Other); + SDValue Ops[] = { Chain, getRegister(Reg, VT) }; + return getNode(ISD::CopyFromReg, dl, VTs, Ops, 2); + } + + // This version of the getCopyFromReg method takes an extra operand, which + // indicates that there is potentially an incoming flag value (if Flag is not + // null) and that there should be a flag result. + SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, MVT VT, + SDValue Flag) { + SDVTList VTs = getVTList(VT, MVT::Other, MVT::Flag); + SDValue Ops[] = { Chain, getRegister(Reg, VT), Flag }; + return getNode(ISD::CopyFromReg, dl, VTs, Ops, Flag.getNode() ? 3 : 2); + } + + SDValue getCondCode(ISD::CondCode Cond); + + /// Returns the ConvertRndSat Note: Avoid using this node because it may + /// disappear in the future and most targets don't support it. + SDValue getConvertRndSat(MVT VT, DebugLoc dl, SDValue Val, SDValue DTy, + SDValue STy, + SDValue Rnd, SDValue Sat, ISD::CvtCode Code); + + /// getVectorShuffle - Return an ISD::VECTOR_SHUFFLE node. The number of + /// elements in VT, which must be a vector type, must match the number of + /// mask elements NumElts. A integer mask element equal to -1 is treated as + /// undefined. + SDValue getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1, SDValue N2, + const int *MaskElts); + + /// getZeroExtendInReg - Return the expression required to zero extend the Op + /// value assuming it was the smaller SrcTy value. + SDValue getZeroExtendInReg(SDValue Op, DebugLoc DL, MVT SrcTy); + + /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). + SDValue getNOT(DebugLoc DL, SDValue Val, MVT VT); + + /// getCALLSEQ_START - Return a new CALLSEQ_START node, which always must have + /// a flag result (to ensure it's not CSE'd). CALLSEQ_START does not have a + /// useful DebugLoc. + SDValue getCALLSEQ_START(SDValue Chain, SDValue Op) { + SDVTList VTs = getVTList(MVT::Other, MVT::Flag); + SDValue Ops[] = { Chain, Op }; + return getNode(ISD::CALLSEQ_START, DebugLoc::getUnknownLoc(), + VTs, Ops, 2); + } + + /// getCALLSEQ_END - Return a new CALLSEQ_END node, which always must have a + /// flag result (to ensure it's not CSE'd). CALLSEQ_END does not have + /// a useful DebugLoc. + SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, + SDValue InFlag) { + SDVTList NodeTys = getVTList(MVT::Other, MVT::Flag); + SmallVector<SDValue, 4> Ops; + Ops.push_back(Chain); + Ops.push_back(Op1); + Ops.push_back(Op2); + Ops.push_back(InFlag); + return getNode(ISD::CALLSEQ_END, DebugLoc::getUnknownLoc(), NodeTys, + &Ops[0], + (unsigned)Ops.size() - (InFlag.getNode() == 0 ? 1 : 0)); + } + + /// getUNDEF - Return an UNDEF node. UNDEF does not have a useful DebugLoc. + SDValue getUNDEF(MVT VT) { + return getNode(ISD::UNDEF, DebugLoc::getUnknownLoc(), VT); + } + + /// getGLOBAL_OFFSET_TABLE - Return a GLOBAL_OFFSET_TABLE node. This does + /// not have a useful DebugLoc. + SDValue getGLOBAL_OFFSET_TABLE(MVT VT) { + return getNode(ISD::GLOBAL_OFFSET_TABLE, DebugLoc::getUnknownLoc(), VT); + } + + /// getNode - Gets or creates the specified node. + /// + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, SDValue N); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, SDValue N1, SDValue N2); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, + SDValue N1, SDValue N2, SDValue N3); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, + SDValue N1, SDValue N2, SDValue N3, SDValue N4); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, + SDValue N1, SDValue N2, SDValue N3, SDValue N4, + SDValue N5); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, + const SDUse *Ops, unsigned NumOps); + SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, + const SDValue *Ops, unsigned NumOps); + SDValue getNode(unsigned Opcode, DebugLoc DL, + const std::vector<MVT> &ResultTys, + const SDValue *Ops, unsigned NumOps); + SDValue getNode(unsigned Opcode, DebugLoc DL, const MVT *VTs, unsigned NumVTs, + const SDValue *Ops, unsigned NumOps); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, + const SDValue *Ops, unsigned NumOps); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, SDValue N); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, + SDValue N1, SDValue N2); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, + SDValue N1, SDValue N2, SDValue N3); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, + SDValue N1, SDValue N2, SDValue N3, SDValue N4); + SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, + SDValue N1, SDValue N2, SDValue N3, SDValue N4, + SDValue N5); + + SDValue getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, bool AlwaysInline, + const Value *DstSV, uint64_t DstSVOff, + const Value *SrcSV, uint64_t SrcSVOff); + + SDValue getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, + const Value *DstSV, uint64_t DstOSVff, + const Value *SrcSV, uint64_t SrcSVOff); + + SDValue getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, + const Value *DstSV, uint64_t DstSVOff); + + /// getSetCC - Helper function to make it easier to build SetCC's if you just + /// have an ISD::CondCode instead of an SDValue. + /// + SDValue getSetCC(DebugLoc DL, MVT VT, SDValue LHS, SDValue RHS, + ISD::CondCode Cond) { + return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond)); + } + + /// getVSetCC - Helper function to make it easier to build VSetCC's nodes + /// if you just have an ISD::CondCode instead of an SDValue. + /// + SDValue getVSetCC(DebugLoc DL, MVT VT, SDValue LHS, SDValue RHS, + ISD::CondCode Cond) { + return getNode(ISD::VSETCC, DL, VT, LHS, RHS, getCondCode(Cond)); + } + + /// getSelectCC - Helper function to make it easier to build SelectCC's if you + /// just have an ISD::CondCode instead of an SDValue. + /// + SDValue getSelectCC(DebugLoc DL, SDValue LHS, SDValue RHS, + SDValue True, SDValue False, ISD::CondCode Cond) { + return getNode(ISD::SELECT_CC, DL, True.getValueType(), + LHS, RHS, True, False, getCondCode(Cond)); + } + + /// getVAArg - VAArg produces a result and token chain, and takes a pointer + /// and a source value as input. + SDValue getVAArg(MVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr, + SDValue SV); + + /// getAtomic - Gets a node for an atomic op, produces result and chain and + /// takes 3 operands + SDValue getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT, SDValue Chain, + SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal, + unsigned Alignment=0); + + /// getAtomic - Gets a node for an atomic op, produces result and chain and + /// takes 2 operands. + SDValue getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT, SDValue Chain, + SDValue Ptr, SDValue Val, const Value* PtrVal, + unsigned Alignment = 0); + + /// getMemIntrinsicNode - Creates a MemIntrinsicNode that may produce a + /// result and takes a list of operands. + SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, + const MVT *VTs, unsigned NumVTs, + const SDValue *Ops, unsigned NumOps, + MVT MemVT, const Value *srcValue, int SVOff, + unsigned Align = 0, bool Vol = false, + bool ReadMem = true, bool WriteMem = true); + + SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, + const SDValue *Ops, unsigned NumOps, + MVT MemVT, const Value *srcValue, int SVOff, + unsigned Align = 0, bool Vol = false, + bool ReadMem = true, bool WriteMem = true); + + /// getMergeValues - Create a MERGE_VALUES node from the given operands. + SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, DebugLoc dl); + + /// getCall - Create a CALL node from the given information. + /// + SDValue getCall(unsigned CallingConv, DebugLoc dl, bool IsVarArgs, + bool IsTailCall, bool isInreg, SDVTList VTs, + const SDValue *Operands, unsigned NumOperands); + + /// getLoad - Loads are not normal binary operators: their result type is not + /// determined by their operands, and they produce a value AND a token chain. + /// + SDValue getLoad(MVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr, + const Value *SV, int SVOffset, bool isVolatile=false, + unsigned Alignment=0); + SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, MVT VT, + SDValue Chain, SDValue Ptr, const Value *SV, + int SVOffset, MVT EVT, bool isVolatile=false, + unsigned Alignment=0); + SDValue getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base, + SDValue Offset, ISD::MemIndexedMode AM); + SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType, + MVT VT, SDValue Chain, + SDValue Ptr, SDValue Offset, + const Value *SV, int SVOffset, MVT EVT, + bool isVolatile=false, unsigned Alignment=0); + + /// getStore - Helper function to build ISD::STORE nodes. + /// + SDValue getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr, + const Value *SV, int SVOffset, bool isVolatile=false, + unsigned Alignment=0); + SDValue getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr, + const Value *SV, int SVOffset, MVT TVT, + bool isVolatile=false, unsigned Alignment=0); + SDValue getIndexedStore(SDValue OrigStoe, DebugLoc dl, SDValue Base, + SDValue Offset, ISD::MemIndexedMode AM); + + /// getSrcValue - Construct a node to track a Value* through the backend. + SDValue getSrcValue(const Value *v); + + /// getMemOperand - Construct a node to track a memory reference + /// through the backend. + SDValue getMemOperand(const MachineMemOperand &MO); + + /// getShiftAmountOperand - Return the specified value casted to + /// the target's desired shift amount type. + SDValue getShiftAmountOperand(SDValue Op); + + /// UpdateNodeOperands - *Mutate* the specified node in-place to have the + /// specified operands. If the resultant node already exists in the DAG, + /// this does not modify the specified node, instead it returns the node that + /// already exists. If the resultant node does not exist in the DAG, the + /// input node is returned. As a degenerate case, if you specify the same + /// input operands as the node already has, the input node is returned. + SDValue UpdateNodeOperands(SDValue N, SDValue Op); + SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2); + SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, + SDValue Op3); + SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, + SDValue Op3, SDValue Op4); + SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, + SDValue Op3, SDValue Op4, SDValue Op5); + SDValue UpdateNodeOperands(SDValue N, + const SDValue *Ops, unsigned NumOps); + + /// SelectNodeTo - These are used for target selectors to *mutate* the + /// specified node to have the specified return type, Target opcode, and + /// operands. Note that target opcodes are stored as + /// ~TargetOpcode in the node opcode field. The resultant node is returned. + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, SDValue Op1); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, + SDValue Op1, SDValue Op2); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, + SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, + const SDValue *Ops, unsigned NumOps); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, MVT VT2); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, const SDValue *Ops, unsigned NumOps); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, MVT VT3, const SDValue *Ops, unsigned NumOps); + SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1, + MVT VT2, MVT VT3, MVT VT4, const SDValue *Ops, + unsigned NumOps); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, SDValue Op1); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, SDValue Op1, SDValue Op2); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, + MVT VT2, MVT VT3, SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs, + const SDValue *Ops, unsigned NumOps); + + /// MorphNodeTo - These *mutate* the specified node to have the specified + /// return type, opcode, and operands. + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT, SDValue Op1); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT, + SDValue Op1, SDValue Op2); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT, + SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT, + const SDValue *Ops, unsigned NumOps); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, + MVT VT2, const SDValue *Ops, unsigned NumOps); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, + MVT VT2, MVT VT3, const SDValue *Ops, unsigned NumOps); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, + MVT VT2, SDValue Op1); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, + MVT VT2, SDValue Op1, SDValue Op2); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, + MVT VT2, SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, + const SDValue *Ops, unsigned NumOps); + + /// getTargetNode - These are used for target selectors to create a new node + /// with specified return type(s), target opcode, and operands. + /// + /// Note that getTargetNode returns the resultant node. If there is already a + /// node of the specified opcode and operands, it returns that node instead of + /// the current one. + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, SDValue Op1); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, SDValue Op1, + SDValue Op2); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, + SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, + const SDValue *Ops, unsigned NumOps); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, + SDValue Op1); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, + MVT VT2, SDValue Op1, SDValue Op2); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, + MVT VT2, SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, + const SDValue *Ops, unsigned NumOps); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3, + SDValue Op1, SDValue Op2); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3, + SDValue Op1, SDValue Op2, SDValue Op3); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3, + const SDValue *Ops, unsigned NumOps); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3, + MVT VT4, const SDValue *Ops, unsigned NumOps); + SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, + const std::vector<MVT> &ResultTys, const SDValue *Ops, + unsigned NumOps); + + /// getNodeIfExists - Get the specified node if it's already available, or + /// else return NULL. + SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, + const SDValue *Ops, unsigned NumOps); + + /// DAGUpdateListener - Clients of various APIs that cause global effects on + /// the DAG can optionally implement this interface. This allows the clients + /// to handle the various sorts of updates that happen. + class DAGUpdateListener { + public: + virtual ~DAGUpdateListener(); + + /// NodeDeleted - The node N that was deleted and, if E is not null, an + /// equivalent node E that replaced it. + virtual void NodeDeleted(SDNode *N, SDNode *E) = 0; + + /// NodeUpdated - The node N that was updated. + virtual void NodeUpdated(SDNode *N) = 0; + }; + + /// RemoveDeadNode - Remove the specified node from the system. If any of its + /// operands then becomes dead, remove them as well. Inform UpdateListener + /// for each node deleted. + void RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener = 0); + + /// RemoveDeadNodes - This method deletes the unreachable nodes in the + /// given list, and any nodes that become unreachable as a result. + void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes, + DAGUpdateListener *UpdateListener = 0); + + /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. + /// This can cause recursive merging of nodes in the DAG. Use the first + /// version if 'From' is known to have a single result, use the second + /// if you have two nodes with identical results (or if 'To' has a superset + /// of the results of 'From'), use the third otherwise. + /// + /// These methods all take an optional UpdateListener, which (if not null) is + /// informed about nodes that are deleted and modified due to recursive + /// changes in the dag. + /// + /// These functions only replace all existing uses. It's possible that as + /// these replacements are being performed, CSE may cause the From node + /// to be given new uses. These new uses of From are left in place, and + /// not automatically transfered to To. + /// + void ReplaceAllUsesWith(SDValue From, SDValue Op, + DAGUpdateListener *UpdateListener = 0); + void ReplaceAllUsesWith(SDNode *From, SDNode *To, + DAGUpdateListener *UpdateListener = 0); + void ReplaceAllUsesWith(SDNode *From, const SDValue *To, + DAGUpdateListener *UpdateListener = 0); + + /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving + /// uses of other values produced by From.Val alone. + void ReplaceAllUsesOfValueWith(SDValue From, SDValue To, + DAGUpdateListener *UpdateListener = 0); + + /// ReplaceAllUsesOfValuesWith - Like ReplaceAllUsesOfValueWith, but + /// for multiple values at once. This correctly handles the case where + /// there is an overlap between the From values and the To values. + void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, + unsigned Num, + DAGUpdateListener *UpdateListener = 0); + + /// AssignTopologicalOrder - Topological-sort the AllNodes list and a + /// assign a unique node id for each node in the DAG based on their + /// topological order. Returns the number of nodes. + unsigned AssignTopologicalOrder(); + + /// RepositionNode - Move node N in the AllNodes list to be immediately + /// before the given iterator Position. This may be used to update the + /// topological ordering when the list of nodes is modified. + void RepositionNode(allnodes_iterator Position, SDNode *N) { + AllNodes.insert(Position, AllNodes.remove(N)); + } + + /// isCommutativeBinOp - Returns true if the opcode is a commutative binary + /// operation. + static bool isCommutativeBinOp(unsigned Opcode) { + // FIXME: This should get its info from the td file, so that we can include + // target info. + switch (Opcode) { + case ISD::ADD: + case ISD::MUL: + case ISD::MULHU: + case ISD::MULHS: + case ISD::SMUL_LOHI: + case ISD::UMUL_LOHI: + case ISD::FADD: + case ISD::FMUL: + case ISD::AND: + case ISD::OR: + case ISD::XOR: + case ISD::SADDO: + case ISD::UADDO: + case ISD::ADDC: + case ISD::ADDE: return true; + default: return false; + } + } + + void dump() const; + + /// CreateStackTemporary - Create a stack temporary, suitable for holding the + /// specified value type. If minAlign is specified, the slot size will have + /// at least that alignment. + SDValue CreateStackTemporary(MVT VT, unsigned minAlign = 1); + + /// CreateStackTemporary - Create a stack temporary suitable for holding + /// either of the specified value types. + SDValue CreateStackTemporary(MVT VT1, MVT VT2); + + /// FoldConstantArithmetic - + SDValue FoldConstantArithmetic(unsigned Opcode, + MVT VT, + ConstantSDNode *Cst1, + ConstantSDNode *Cst2); + + /// FoldSetCC - Constant fold a setcc to true or false. + SDValue FoldSetCC(MVT VT, SDValue N1, + SDValue N2, ISD::CondCode Cond, DebugLoc dl); + + /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We + /// use this predicate to simplify operations downstream. + bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const; + + /// MaskedValueIsZero - Return true if 'Op & Mask' is known to be zero. We + /// use this predicate to simplify operations downstream. Op and Mask are + /// known to be the same type. + bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0) + const; + + /// ComputeMaskedBits - Determine which of the bits specified in Mask are + /// known to be either zero or one and return them in the KnownZero/KnownOne + /// bitsets. This code only analyzes bits in Mask, in order to short-circuit + /// processing. Targets can implement the computeMaskedBitsForTargetNode + /// method in the TargetLowering class to allow target nodes to be understood. + void ComputeMaskedBits(SDValue Op, const APInt &Mask, APInt &KnownZero, + APInt &KnownOne, unsigned Depth = 0) const; + + /// ComputeNumSignBits - Return the number of times the sign bit of the + /// register is replicated into the other bits. We know that at least 1 bit + /// is always equal to the sign bit (itself), but other cases can give us + /// information. For example, immediately after an "SRA X, 2", we know that + /// the top 3 bits are all equal to each other, so we return 3. Targets can + /// implement the ComputeNumSignBitsForTarget method in the TargetLowering + /// class to allow target nodes to be understood. + unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const; + + /// isVerifiedDebugInfoDesc - Returns true if the specified SDValue has + /// been verified as a debug information descriptor. + bool isVerifiedDebugInfoDesc(SDValue Op) const; + + /// getShuffleScalarElt - Returns the scalar element that will make up the ith + /// element of the result of the vector shuffle. + SDValue getShuffleScalarElt(const ShuffleVectorSDNode *N, unsigned Idx); + +private: + bool RemoveNodeFromCSEMaps(SDNode *N); + void AddModifiedNodeToCSEMaps(SDNode *N, DAGUpdateListener *UpdateListener); + SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos); + SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2, + void *&InsertPos); + SDNode *FindModifiedNodeSlot(SDNode *N, const SDValue *Ops, unsigned NumOps, + void *&InsertPos); + + void DeleteNodeNotInCSEMaps(SDNode *N); + void DeallocateNode(SDNode *N); + + unsigned getMVTAlignment(MVT MemoryVT) const; + + void allnodes_clear(); + + /// VTList - List of non-single value types. + std::vector<SDVTList> VTList; + + /// CondCodeNodes - Maps to auto-CSE operations. + std::vector<CondCodeSDNode*> CondCodeNodes; + + std::vector<SDNode*> ValueTypeNodes; + std::map<MVT, SDNode*, MVT::compareRawBits> ExtendedValueTypeNodes; + StringMap<SDNode*> ExternalSymbols; + StringMap<SDNode*> TargetExternalSymbols; +}; + +template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> { + typedef SelectionDAG::allnodes_iterator nodes_iterator; + static nodes_iterator nodes_begin(SelectionDAG *G) { + return G->allnodes_begin(); + } + static nodes_iterator nodes_end(SelectionDAG *G) { + return G->allnodes_end(); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h new file mode 100644 index 0000000..d2c0dc4 --- /dev/null +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -0,0 +1,140 @@ +//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the SelectionDAGISel class, which is used as the common +// base class for SelectionDAG-based instruction selectors. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SELECTIONDAG_ISEL_H +#define LLVM_CODEGEN_SELECTIONDAG_ISEL_H + +#include "llvm/BasicBlock.h" +#include "llvm/Pass.h" +#include "llvm/Constant.h" +#include "llvm/CodeGen/SelectionDAG.h" + +namespace llvm { + class FastISel; + class SelectionDAGLowering; + class SDValue; + class MachineRegisterInfo; + class MachineBasicBlock; + class MachineFunction; + class MachineInstr; + class MachineModuleInfo; + class DwarfWriter; + class TargetLowering; + class TargetInstrInfo; + class FunctionLoweringInfo; + class ScheduleHazardRecognizer; + class GCFunctionInfo; + class ScheduleDAGSDNodes; + +/// SelectionDAGISel - This is the common base class used for SelectionDAG-based +/// pattern-matching instruction selectors. +class SelectionDAGISel : public FunctionPass { +public: + const TargetMachine &TM; + TargetLowering &TLI; + FunctionLoweringInfo *FuncInfo; + MachineFunction *MF; + MachineRegisterInfo *RegInfo; + SelectionDAG *CurDAG; + SelectionDAGLowering *SDL; + MachineBasicBlock *BB; + AliasAnalysis *AA; + GCFunctionInfo *GFI; + CodeGenOpt::Level OptLevel; + static char ID; + + explicit SelectionDAGISel(TargetMachine &tm, + CodeGenOpt::Level OL = CodeGenOpt::Default); + virtual ~SelectionDAGISel(); + + TargetLowering &getTargetLowering() { return TLI; } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + virtual bool runOnFunction(Function &Fn); + + unsigned MakeReg(MVT VT); + + virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {} + virtual void InstructionSelect() = 0; + + void SelectRootInit() { + DAGSize = CurDAG->AssignTopologicalOrder(); + } + + /// SelectInlineAsmMemoryOperand - Select the specified address as a target + /// addressing mode, according to the specified constraint code. If this does + /// not match or is not implemented, return true. The resultant operands + /// (which will appear in the machine instruction) should be added to the + /// OutOps vector. + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, + char ConstraintCode, + std::vector<SDValue> &OutOps) { + return true; + } + + /// IsLegalAndProfitableToFold - Returns true if the specific operand node N of + /// U can be folded during instruction selection that starts at Root and + /// folding N is profitable. + virtual + bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const; + + /// CreateTargetHazardRecognizer - Return a newly allocated hazard recognizer + /// to use for this target when scheduling the DAG. + virtual ScheduleHazardRecognizer *CreateTargetHazardRecognizer(); + +protected: + /// DAGSize - Size of DAG being instruction selected. + /// + unsigned DAGSize; + + /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated + /// by tblgen. Others should not call it. + void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops); + + // Calls to these predicates are generated by tblgen. + bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS, + int64_t DesiredMaskS) const; + bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS, + int64_t DesiredMaskS) const; + +private: + void SelectAllBasicBlocks(Function &Fn, MachineFunction &MF, + MachineModuleInfo *MMI, + DwarfWriter *DW, + const TargetInstrInfo &TII); + void FinishBasicBlock(); + + void SelectBasicBlock(BasicBlock *LLVMBB, + BasicBlock::iterator Begin, + BasicBlock::iterator End); + void CodeGenAndEmitDAG(); + void LowerArguments(BasicBlock *BB); + + void ComputeLiveOutVRegInfo(); + + void HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB); + + bool HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB, FastISel *F); + + /// Create the scheduler. If a specific scheduler was specified + /// via the SchedulerRegistry, use it, otherwise select the + /// one preferred by the target. + /// + ScheduleDAGSDNodes *CreateScheduler(); +}; + +} + +#endif /* LLVM_CODEGEN_SELECTIONDAG_ISEL_H */ diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h new file mode 100644 index 0000000..ad48510 --- /dev/null +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -0,0 +1,2568 @@ +//===-- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the SDNode class and derived classes, which are used to +// represent the nodes and operations present in a SelectionDAG. These nodes +// and operations are machine code level operations, with some similarities to +// the GCC RTL representation. +// +// Clients should include the SelectionDAG.h file instead of this file directly. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H +#define LLVM_CODEGEN_SELECTIONDAGNODES_H + +#include "llvm/Constants.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/ilist_node.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/RecyclingAllocator.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/CodeGen/DebugLoc.h" +#include <cassert> +#include <climits> + +namespace llvm { + +class SelectionDAG; +class GlobalValue; +class MachineBasicBlock; +class MachineConstantPoolValue; +class SDNode; +class Value; +template <typename T> struct DenseMapInfo; +template <typename T> struct simplify_type; +template <typename T> struct ilist_traits; + +/// SDVTList - This represents a list of ValueType's that has been intern'd by +/// a SelectionDAG. Instances of this simple value class are returned by +/// SelectionDAG::getVTList(...). +/// +struct SDVTList { + const MVT *VTs; + unsigned int NumVTs; +}; + +/// ISD namespace - This namespace contains an enum which represents all of the +/// SelectionDAG node types and value types. +/// +namespace ISD { + + //===--------------------------------------------------------------------===// + /// ISD::NodeType enum - This enum defines the target-independent operators + /// for a SelectionDAG. + /// + /// Targets may also define target-dependent operator codes for SDNodes. For + /// example, on x86, these are the enum values in the X86ISD namespace. + /// Targets should aim to use target-independent operators to model their + /// instruction sets as much as possible, and only use target-dependent + /// operators when they have special requirements. + /// + /// Finally, during and after selection proper, SNodes may use special + /// operator codes that correspond directly with MachineInstr opcodes. These + /// are used to represent selected instructions. See the isMachineOpcode() + /// and getMachineOpcode() member functions of SDNode. + /// + enum NodeType { + // DELETED_NODE - This is an illegal value that is used to catch + // errors. This opcode is not a legal opcode for any node. + DELETED_NODE, + + // EntryToken - This is the marker used to indicate the start of the region. + EntryToken, + + // TokenFactor - This node takes multiple tokens as input and produces a + // single token result. This is used to represent the fact that the operand + // operators are independent of each other. + TokenFactor, + + // AssertSext, AssertZext - These nodes record if a register contains a + // value that has already been zero or sign extended from a narrower type. + // These nodes take two operands. The first is the node that has already + // been extended, and the second is a value type node indicating the width + // of the extension + AssertSext, AssertZext, + + // Various leaf nodes. + BasicBlock, VALUETYPE, ARG_FLAGS, CONDCODE, Register, + Constant, ConstantFP, + GlobalAddress, GlobalTLSAddress, FrameIndex, + JumpTable, ConstantPool, ExternalSymbol, + + // The address of the GOT + GLOBAL_OFFSET_TABLE, + + // FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and + // llvm.returnaddress on the DAG. These nodes take one operand, the index + // of the frame or return address to return. An index of zero corresponds + // to the current function's frame or return address, an index of one to the + // parent's frame or return address, and so on. + FRAMEADDR, RETURNADDR, + + // FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to + // first (possible) on-stack argument. This is needed for correct stack + // adjustment during unwind. + FRAME_TO_ARGS_OFFSET, + + // RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the + // address of the exception block on entry to an landing pad block. + EXCEPTIONADDR, + + // RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node represents + // the selection index of the exception thrown. + EHSELECTION, + + // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents + // 'eh_return' gcc dwarf builtin, which is used to return from + // exception. The general meaning is: adjust stack by OFFSET and pass + // execution to HANDLER. Many platform-related details also :) + EH_RETURN, + + // TargetConstant* - Like Constant*, but the DAG does not do any folding or + // simplification of the constant. + TargetConstant, + TargetConstantFP, + + // TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or + // anything else with this node, and this is valid in the target-specific + // dag, turning into a GlobalAddress operand. + TargetGlobalAddress, + TargetGlobalTLSAddress, + TargetFrameIndex, + TargetJumpTable, + TargetConstantPool, + TargetExternalSymbol, + + /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) + /// This node represents a target intrinsic function with no side effects. + /// The first operand is the ID number of the intrinsic from the + /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The + /// node has returns the result of the intrinsic. + INTRINSIC_WO_CHAIN, + + /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) + /// This node represents a target intrinsic function with side effects that + /// returns a result. The first operand is a chain pointer. The second is + /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The + /// operands to the intrinsic follow. The node has two results, the result + /// of the intrinsic and an output chain. + INTRINSIC_W_CHAIN, + + /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) + /// This node represents a target intrinsic function with side effects that + /// does not return a result. The first operand is a chain pointer. The + /// second is the ID number of the intrinsic from the llvm::Intrinsic + /// namespace. The operands to the intrinsic follow. + INTRINSIC_VOID, + + // CopyToReg - This node has three operands: a chain, a register number to + // set to this value, and a value. + CopyToReg, + + // CopyFromReg - This node indicates that the input value is a virtual or + // physical register that is defined outside of the scope of this + // SelectionDAG. The register is available from the RegisterSDNode object. + CopyFromReg, + + // UNDEF - An undefined node + UNDEF, + + /// FORMAL_ARGUMENTS(CHAIN, CC#, ISVARARG, FLAG0, ..., FLAGn) - This node + /// represents the formal arguments for a function. CC# is a Constant value + /// indicating the calling convention of the function, and ISVARARG is a + /// flag that indicates whether the function is varargs or not. This node + /// has one result value for each incoming argument, plus one for the output + /// chain. It must be custom legalized. See description of CALL node for + /// FLAG argument contents explanation. + /// + FORMAL_ARGUMENTS, + + /// RV1, RV2...RVn, CHAIN = CALL(CHAIN, CALLEE, + /// ARG0, FLAG0, ARG1, FLAG1, ... ARGn, FLAGn) + /// This node represents a fully general function call, before the legalizer + /// runs. This has one result value for each argument / flag pair, plus + /// a chain result. It must be custom legalized. Flag argument indicates + /// misc. argument attributes. Currently: + /// Bit 0 - signness + /// Bit 1 - 'inreg' attribute + /// Bit 2 - 'sret' attribute + /// Bit 4 - 'byval' attribute + /// Bit 5 - 'nest' attribute + /// Bit 6-9 - alignment of byval structures + /// Bit 10-26 - size of byval structures + /// Bits 31:27 - argument ABI alignment in the first argument piece and + /// alignment '1' in other argument pieces. + /// + /// CALL nodes use the CallSDNode subclass of SDNode, which + /// additionally carries information about the calling convention, + /// whether the call is varargs, and if it's marked as a tail call. + /// + CALL, + + // EXTRACT_ELEMENT - This is used to get the lower or upper (determined by + // a Constant, which is required to be operand #1) half of the integer or + // float value specified as operand #0. This is only for use before + // legalization, for values that will be broken into multiple registers. + EXTRACT_ELEMENT, + + // BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. Given + // two values of the same integer value type, this produces a value twice as + // big. Like EXTRACT_ELEMENT, this can only be used before legalization. + BUILD_PAIR, + + // MERGE_VALUES - This node takes multiple discrete operands and returns + // them all as its individual results. This nodes has exactly the same + // number of inputs and outputs, and is only valid before legalization. + // This node is useful for some pieces of the code generator that want to + // think about a single node with multiple results, not multiple nodes. + MERGE_VALUES, + + // Simple integer binary arithmetic operators. + ADD, SUB, MUL, SDIV, UDIV, SREM, UREM, + + // SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing + // a signed/unsigned value of type i[2*N], and return the full value as + // two results, each of type iN. + SMUL_LOHI, UMUL_LOHI, + + // SDIVREM/UDIVREM - Divide two integers and produce both a quotient and + // remainder result. + SDIVREM, UDIVREM, + + // CARRY_FALSE - This node is used when folding other nodes, + // like ADDC/SUBC, which indicate the carry result is always false. + CARRY_FALSE, + + // Carry-setting nodes for multiple precision addition and subtraction. + // These nodes take two operands of the same value type, and produce two + // results. The first result is the normal add or sub result, the second + // result is the carry flag result. + ADDC, SUBC, + + // Carry-using nodes for multiple precision addition and subtraction. These + // nodes take three operands: The first two are the normal lhs and rhs to + // the add or sub, and the third is the input carry flag. These nodes + // produce two results; the normal result of the add or sub, and the output + // carry flag. These nodes both read and write a carry flag to allow them + // to them to be chained together for add and sub of arbitrarily large + // values. + ADDE, SUBE, + + // RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition. + // These nodes take two operands: the normal LHS and RHS to the add. They + // produce two results: the normal result of the add, and a boolean that + // indicates if an overflow occured (*not* a flag, because it may be stored + // to memory, etc.). If the type of the boolean is not i1 then the high + // bits conform to getBooleanContents. + // These nodes are generated from the llvm.[su]add.with.overflow intrinsics. + SADDO, UADDO, + + // Same for subtraction + SSUBO, USUBO, + + // Same for multiplication + SMULO, UMULO, + + // Simple binary floating point operators. + FADD, FSUB, FMUL, FDIV, FREM, + + // FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This + // DAG node does not require that X and Y have the same type, just that they + // are both floating point. X and the result must have the same type. + // FCOPYSIGN(f32, f64) is allowed. + FCOPYSIGN, + + // INT = FGETSIGN(FP) - Return the sign bit of the specified floating point + // value as an integer 0/1 value. + FGETSIGN, + + /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the + /// specified, possibly variable, elements. The number of elements is + /// required to be a power of two. The types of the operands must all be + /// the same and must match the vector element type, except that integer + /// types are allowed to be larger than the element type, in which case + /// the operands are implicitly truncated. + BUILD_VECTOR, + + /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element + /// at IDX replaced with VAL. If the type of VAL is larger than the vector + /// element type then VAL is truncated before replacement. + INSERT_VECTOR_ELT, + + /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR + /// identified by the (potentially variable) element number IDX. + EXTRACT_VECTOR_ELT, + + /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of + /// vector type with the same length and element type, this produces a + /// concatenated vector result value, with length equal to the sum of the + /// lengths of the input vectors. + CONCAT_VECTORS, + + /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an + /// vector value) starting with the (potentially variable) element number + /// IDX, which must be a multiple of the result vector length. + EXTRACT_SUBVECTOR, + + /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as + /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int + /// values that indicate which value (or undef) each result element will + /// get. These constant ints are accessible through the + /// ShuffleVectorSDNode class. This is quite similar to the Altivec + /// 'vperm' instruction, except that the indices must be constants and are + /// in terms of the element size of VEC1/VEC2, not in terms of bytes. + VECTOR_SHUFFLE, + + /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a + /// scalar value into element 0 of the resultant vector type. The top + /// elements 1 to N-1 of the N-element vector are undefined. The type + /// of the operand must match the vector element type, except when they + /// are integer types. In this case the operand is allowed to be wider + /// than the vector element type, and is implicitly truncated to it. + SCALAR_TO_VECTOR, + + // MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing + // an unsigned/signed value of type i[2*N], then return the top part. + MULHU, MULHS, + + // Bitwise operators - logical and, logical or, logical xor, shift left, + // shift right algebraic (shift in sign bits), shift right logical (shift in + // zeroes), rotate left, rotate right, and byteswap. + AND, OR, XOR, SHL, SRA, SRL, ROTL, ROTR, BSWAP, + + // Counting operators + CTTZ, CTLZ, CTPOP, + + // Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not + // i1 then the high bits must conform to getBooleanContents. + SELECT, + + // Select with condition operator - This selects between a true value and + // a false value (ops #2 and #3) based on the boolean result of comparing + // the lhs and rhs (ops #0 and #1) of a conditional expression with the + // condition code in op #4, a CondCodeSDNode. + SELECT_CC, + + // SetCC operator - This evaluates to a true value iff the condition is + // true. If the result value type is not i1 then the high bits conform + // to getBooleanContents. The operands to this are the left and right + // operands to compare (ops #0, and #1) and the condition code to compare + // them with (op #2) as a CondCodeSDNode. + SETCC, + + // Vector SetCC operator - This evaluates to a vector of integer elements + // with the high bit in each element set to true if the comparison is true + // and false if the comparison is false. All other bits in each element + // are undefined. The operands to this are the left and right operands + // to compare (ops #0, and #1) and the condition code to compare them with + // (op #2) as a CondCodeSDNode. + VSETCC, + + // SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded + // integer shift operations, just like ADD/SUB_PARTS. The operation + // ordering is: + // [Lo,Hi] = op [LoLHS,HiLHS], Amt + SHL_PARTS, SRA_PARTS, SRL_PARTS, + + // Conversion operators. These are all single input single output + // operations. For all of these, the result type must be strictly + // wider or narrower (depending on the operation) than the source + // type. + + // SIGN_EXTEND - Used for integer types, replicating the sign bit + // into new bits. + SIGN_EXTEND, + + // ZERO_EXTEND - Used for integer types, zeroing the new bits. + ZERO_EXTEND, + + // ANY_EXTEND - Used for integer types. The high bits are undefined. + ANY_EXTEND, + + // TRUNCATE - Completely drop the high bits. + TRUNCATE, + + // [SU]INT_TO_FP - These operators convert integers (whose interpreted sign + // depends on the first letter) to floating point. + SINT_TO_FP, + UINT_TO_FP, + + // SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to + // sign extend a small value in a large integer register (e.g. sign + // extending the low 8 bits of a 32-bit register to fill the top 24 bits + // with the 7th bit). The size of the smaller type is indicated by the 1th + // operand, a ValueType node. + SIGN_EXTEND_INREG, + + /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned + /// integer. + FP_TO_SINT, + FP_TO_UINT, + + /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type + /// down to the precision of the destination VT. TRUNC is a flag, which is + /// always an integer that is zero or one. If TRUNC is 0, this is a + /// normal rounding, if it is 1, this FP_ROUND is known to not change the + /// value of Y. + /// + /// The TRUNC = 1 case is used in cases where we know that the value will + /// not be modified by the node, because Y is not using any of the extra + /// precision of source type. This allows certain transformations like + /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for + /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed. + FP_ROUND, + + // FLT_ROUNDS_ - Returns current rounding mode: + // -1 Undefined + // 0 Round to 0 + // 1 Round to nearest + // 2 Round to +inf + // 3 Round to -inf + FLT_ROUNDS_, + + /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and + /// rounds it to a floating point value. It then promotes it and returns it + /// in a register of the same size. This operation effectively just + /// discards excess precision. The type to round down to is specified by + /// the VT operand, a VTSDNode. + FP_ROUND_INREG, + + /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type. + FP_EXTEND, + + // BIT_CONVERT - Theis operator converts between integer and FP values, as + // if one was stored to memory as integer and the other was loaded from the + // same address (or equivalently for vector format conversions, etc). The + // source and result are required to have the same bit size (e.g. + // f32 <-> i32). This can also be used for int-to-int or fp-to-fp + // conversions, but that is a noop, deleted by getNode(). + BIT_CONVERT, + + // CONVERT_RNDSAT - This operator is used to support various conversions + // between various types (float, signed, unsigned and vectors of those + // types) with rounding and saturation. NOTE: Avoid using this operator as + // most target don't support it and the operator might be removed in the + // future. It takes the following arguments: + // 0) value + // 1) dest type (type to convert to) + // 2) src type (type to convert from) + // 3) rounding imm + // 4) saturation imm + // 5) ISD::CvtCode indicating the type of conversion to do + CONVERT_RNDSAT, + + // FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, + // FLOG, FLOG2, FLOG10, FEXP, FEXP2, + // FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary floating + // point operations. These are inspired by libm. + FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, + FLOG, FLOG2, FLOG10, FEXP, FEXP2, + FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR, + + // LOAD and STORE have token chains as their first operand, then the same + // operands as an LLVM load/store instruction, then an offset node that + // is added / subtracted from the base pointer to form the address (for + // indexed memory ops). + LOAD, STORE, + + // DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned + // to a specified boundary. This node always has two return values: a new + // stack pointer value and a chain. The first operand is the token chain, + // the second is the number of bytes to allocate, and the third is the + // alignment boundary. The size is guaranteed to be a multiple of the stack + // alignment, and the alignment is guaranteed to be bigger than the stack + // alignment (if required) or 0 to get standard stack alignment. + DYNAMIC_STACKALLOC, + + // Control flow instructions. These all have token chains. + + // BR - Unconditional branch. The first operand is the chain + // operand, the second is the MBB to branch to. + BR, + + // BRIND - Indirect branch. The first operand is the chain, the second + // is the value to branch to, which must be of the same type as the target's + // pointer type. + BRIND, + + // BR_JT - Jumptable branch. The first operand is the chain, the second + // is the jumptable index, the last one is the jumptable entry index. + BR_JT, + + // BRCOND - Conditional branch. The first operand is the chain, the + // second is the condition, the third is the block to branch to if the + // condition is true. If the type of the condition is not i1, then the + // high bits must conform to getBooleanContents. + BRCOND, + + // BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in + // that the condition is represented as condition code, and two nodes to + // compare, rather than as a combined SetCC node. The operands in order are + // chain, cc, lhs, rhs, block to branch to if condition is true. + BR_CC, + + // RET - Return from function. The first operand is the chain, + // and any subsequent operands are pairs of return value and return value + // attributes (see CALL for description of attributes) for the function. + // This operation can have variable number of operands. + RET, + + // INLINEASM - Represents an inline asm block. This node always has two + // return values: a chain and a flag result. The inputs are as follows: + // Operand #0 : Input chain. + // Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string. + // Operand #2n+2: A RegisterNode. + // Operand #2n+3: A TargetConstant, indicating if the reg is a use/def + // Operand #last: Optional, an incoming flag. + INLINEASM, + + // DBG_LABEL, EH_LABEL - Represents a label in mid basic block used to track + // locations needed for debug and exception handling tables. These nodes + // take a chain as input and return a chain. + DBG_LABEL, + EH_LABEL, + + // DECLARE - Represents a llvm.dbg.declare intrinsic. It's used to track + // local variable declarations for debugging information. First operand is + // a chain, while the next two operands are first two arguments (address + // and variable) of a llvm.dbg.declare instruction. + DECLARE, + + // STACKSAVE - STACKSAVE has one operand, an input chain. It produces a + // value, the same type as the pointer type for the system, and an output + // chain. + STACKSAVE, + + // STACKRESTORE has two operands, an input chain and a pointer to restore to + // it returns an output chain. + STACKRESTORE, + + // CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of + // a call sequence, and carry arbitrary information that target might want + // to know. The first operand is a chain, the rest are specified by the + // target and not touched by the DAG optimizers. + // CALLSEQ_START..CALLSEQ_END pairs may not be nested. + CALLSEQ_START, // Beginning of a call sequence + CALLSEQ_END, // End of a call sequence + + // VAARG - VAARG has three operands: an input chain, a pointer, and a + // SRCVALUE. It returns a pair of values: the vaarg value and a new chain. + VAARG, + + // VACOPY - VACOPY has five operands: an input chain, a destination pointer, + // a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the + // source. + VACOPY, + + // VAEND, VASTART - VAEND and VASTART have three operands: an input chain, a + // pointer, and a SRCVALUE. + VAEND, VASTART, + + // SRCVALUE - This is a node type that holds a Value* that is used to + // make reference to a value in the LLVM IR. + SRCVALUE, + + // MEMOPERAND - This is a node that contains a MachineMemOperand which + // records information about a memory reference. This is used to make + // AliasAnalysis queries from the backend. + MEMOPERAND, + + // PCMARKER - This corresponds to the pcmarker intrinsic. + PCMARKER, + + // READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic. + // The only operand is a chain and a value and a chain are produced. The + // value is the contents of the architecture specific cycle counter like + // register (or other high accuracy low latency clock source) + READCYCLECOUNTER, + + // HANDLENODE node - Used as a handle for various purposes. + HANDLENODE, + + // DBG_STOPPOINT - This node is used to represent a source location for + // debug info. It takes token chain as input, and carries a line number, + // column number, and a pointer to a CompileUnit object identifying + // the containing compilation unit. It produces a token chain as output. + DBG_STOPPOINT, + + // DEBUG_LOC - This node is used to represent source line information + // embedded in the code. It takes a token chain as input, then a line + // number, then a column then a file id (provided by MachineModuleInfo.) It + // produces a token chain as output. + DEBUG_LOC, + + // TRAMPOLINE - This corresponds to the init_trampoline intrinsic. + // It takes as input a token chain, the pointer to the trampoline, + // the pointer to the nested function, the pointer to pass for the + // 'nest' parameter, a SRCVALUE for the trampoline and another for + // the nested function (allowing targets to access the original + // Function*). It produces the result of the intrinsic and a token + // chain as output. + TRAMPOLINE, + + // TRAP - Trapping instruction + TRAP, + + // PREFETCH - This corresponds to a prefetch intrinsic. It takes chains are + // their first operand. The other operands are the address to prefetch, + // read / write specifier, and locality specifier. + PREFETCH, + + // OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load, + // store-store, device) + // This corresponds to the memory.barrier intrinsic. + // it takes an input chain, 4 operands to specify the type of barrier, an + // operand specifying if the barrier applies to device and uncached memory + // and produces an output chain. + MEMBARRIER, + + // Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) + // this corresponds to the atomic.lcs intrinsic. + // cmp is compared to *ptr, and if equal, swap is stored in *ptr. + // the return is always the original value in *ptr + ATOMIC_CMP_SWAP, + + // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) + // this corresponds to the atomic.swap intrinsic. + // amt is stored to *ptr atomically. + // the return is always the original value in *ptr + ATOMIC_SWAP, + + // Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) + // this corresponds to the atomic.load.[OpName] intrinsic. + // op(*ptr, amt) is stored to *ptr atomically. + // the return is always the original value in *ptr + ATOMIC_LOAD_ADD, + ATOMIC_LOAD_SUB, + ATOMIC_LOAD_AND, + ATOMIC_LOAD_OR, + ATOMIC_LOAD_XOR, + ATOMIC_LOAD_NAND, + ATOMIC_LOAD_MIN, + ATOMIC_LOAD_MAX, + ATOMIC_LOAD_UMIN, + ATOMIC_LOAD_UMAX, + + // BUILTIN_OP_END - This must be the last enum value in this list. + BUILTIN_OP_END + }; + + /// Node predicates + + /// isBuildVectorAllOnes - Return true if the specified node is a + /// BUILD_VECTOR where all of the elements are ~0 or undef. + bool isBuildVectorAllOnes(const SDNode *N); + + /// isBuildVectorAllZeros - Return true if the specified node is a + /// BUILD_VECTOR where all of the elements are 0 or undef. + bool isBuildVectorAllZeros(const SDNode *N); + + /// isScalarToVector - Return true if the specified node is a + /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low + /// element is not an undef. + bool isScalarToVector(const SDNode *N); + + /// isDebugLabel - Return true if the specified node represents a debug + /// label (i.e. ISD::DBG_LABEL or TargetInstrInfo::DBG_LABEL node). + bool isDebugLabel(const SDNode *N); + + //===--------------------------------------------------------------------===// + /// MemIndexedMode enum - This enum defines the load / store indexed + /// addressing modes. + /// + /// UNINDEXED "Normal" load / store. The effective address is already + /// computed and is available in the base pointer. The offset + /// operand is always undefined. In addition to producing a + /// chain, an unindexed load produces one value (result of the + /// load); an unindexed store does not produce a value. + /// + /// PRE_INC Similar to the unindexed mode where the effective address is + /// PRE_DEC the value of the base pointer add / subtract the offset. + /// It considers the computation as being folded into the load / + /// store operation (i.e. the load / store does the address + /// computation as well as performing the memory transaction). + /// The base operand is always undefined. In addition to + /// producing a chain, pre-indexed load produces two values + /// (result of the load and the result of the address + /// computation); a pre-indexed store produces one value (result + /// of the address computation). + /// + /// POST_INC The effective address is the value of the base pointer. The + /// POST_DEC value of the offset operand is then added to / subtracted + /// from the base after memory transaction. In addition to + /// producing a chain, post-indexed load produces two values + /// (the result of the load and the result of the base +/- offset + /// computation); a post-indexed store produces one value (the + /// the result of the base +/- offset computation). + /// + enum MemIndexedMode { + UNINDEXED = 0, + PRE_INC, + PRE_DEC, + POST_INC, + POST_DEC, + LAST_INDEXED_MODE + }; + + //===--------------------------------------------------------------------===// + /// LoadExtType enum - This enum defines the three variants of LOADEXT + /// (load with extension). + /// + /// SEXTLOAD loads the integer operand and sign extends it to a larger + /// integer result type. + /// ZEXTLOAD loads the integer operand and zero extends it to a larger + /// integer result type. + /// EXTLOAD is used for three things: floating point extending loads, + /// integer extending loads [the top bits are undefined], and vector + /// extending loads [load into low elt]. + /// + enum LoadExtType { + NON_EXTLOAD = 0, + EXTLOAD, + SEXTLOAD, + ZEXTLOAD, + LAST_LOADEXT_TYPE + }; + + //===--------------------------------------------------------------------===// + /// ISD::CondCode enum - These are ordered carefully to make the bitfields + /// below work out, when considering SETFALSE (something that never exists + /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered + /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal + /// to. If the "N" column is 1, the result of the comparison is undefined if + /// the input is a NAN. + /// + /// All of these (except for the 'always folded ops') should be handled for + /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT, + /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used. + /// + /// Note that these are laid out in a specific order to allow bit-twiddling + /// to transform conditions. + enum CondCode { + // Opcode N U L G E Intuitive operation + SETFALSE, // 0 0 0 0 Always false (always folded) + SETOEQ, // 0 0 0 1 True if ordered and equal + SETOGT, // 0 0 1 0 True if ordered and greater than + SETOGE, // 0 0 1 1 True if ordered and greater than or equal + SETOLT, // 0 1 0 0 True if ordered and less than + SETOLE, // 0 1 0 1 True if ordered and less than or equal + SETONE, // 0 1 1 0 True if ordered and operands are unequal + SETO, // 0 1 1 1 True if ordered (no nans) + SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y) + SETUEQ, // 1 0 0 1 True if unordered or equal + SETUGT, // 1 0 1 0 True if unordered or greater than + SETUGE, // 1 0 1 1 True if unordered, greater than, or equal + SETULT, // 1 1 0 0 True if unordered or less than + SETULE, // 1 1 0 1 True if unordered, less than, or equal + SETUNE, // 1 1 1 0 True if unordered or not equal + SETTRUE, // 1 1 1 1 Always true (always folded) + // Don't care operations: undefined if the input is a nan. + SETFALSE2, // 1 X 0 0 0 Always false (always folded) + SETEQ, // 1 X 0 0 1 True if equal + SETGT, // 1 X 0 1 0 True if greater than + SETGE, // 1 X 0 1 1 True if greater than or equal + SETLT, // 1 X 1 0 0 True if less than + SETLE, // 1 X 1 0 1 True if less than or equal + SETNE, // 1 X 1 1 0 True if not equal + SETTRUE2, // 1 X 1 1 1 Always true (always folded) + + SETCC_INVALID // Marker value. + }; + + /// isSignedIntSetCC - Return true if this is a setcc instruction that + /// performs a signed comparison when used with integer operands. + inline bool isSignedIntSetCC(CondCode Code) { + return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE; + } + + /// isUnsignedIntSetCC - Return true if this is a setcc instruction that + /// performs an unsigned comparison when used with integer operands. + inline bool isUnsignedIntSetCC(CondCode Code) { + return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE; + } + + /// isTrueWhenEqual - Return true if the specified condition returns true if + /// the two operands to the condition are equal. Note that if one of the two + /// operands is a NaN, this value is meaningless. + inline bool isTrueWhenEqual(CondCode Cond) { + return ((int)Cond & 1) != 0; + } + + /// getUnorderedFlavor - This function returns 0 if the condition is always + /// false if an operand is a NaN, 1 if the condition is always true if the + /// operand is a NaN, and 2 if the condition is undefined if the operand is a + /// NaN. + inline unsigned getUnorderedFlavor(CondCode Cond) { + return ((int)Cond >> 3) & 3; + } + + /// getSetCCInverse - Return the operation corresponding to !(X op Y), where + /// 'op' is a valid SetCC operation. + CondCode getSetCCInverse(CondCode Operation, bool isInteger); + + /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X) + /// when given the operation for (X op Y). + CondCode getSetCCSwappedOperands(CondCode Operation); + + /// getSetCCOrOperation - Return the result of a logical OR between different + /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This + /// function returns SETCC_INVALID if it is not possible to represent the + /// resultant comparison. + CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger); + + /// getSetCCAndOperation - Return the result of a logical AND between + /// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This + /// function returns SETCC_INVALID if it is not possible to represent the + /// resultant comparison. + CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger); + + //===--------------------------------------------------------------------===// + /// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT + /// supports. + enum CvtCode { + CVT_FF, // Float from Float + CVT_FS, // Float from Signed + CVT_FU, // Float from Unsigned + CVT_SF, // Signed from Float + CVT_UF, // Unsigned from Float + CVT_SS, // Signed from Signed + CVT_SU, // Signed from Unsigned + CVT_US, // Unsigned from Signed + CVT_UU, // Unsigned from Unsigned + CVT_INVALID // Marker - Invalid opcode + }; +} // end llvm::ISD namespace + + +//===----------------------------------------------------------------------===// +/// SDValue - Unlike LLVM values, Selection DAG nodes may return multiple +/// values as the result of a computation. Many nodes return multiple values, +/// from loads (which define a token and a return value) to ADDC (which returns +/// a result and a carry value), to calls (which may return an arbitrary number +/// of values). +/// +/// As such, each use of a SelectionDAG computation must indicate the node that +/// computes it as well as which return value to use from that node. This pair +/// of information is represented with the SDValue value type. +/// +class SDValue { + SDNode *Node; // The node defining the value we are using. + unsigned ResNo; // Which return value of the node we are using. +public: + SDValue() : Node(0), ResNo(0) {} + SDValue(SDNode *node, unsigned resno) : Node(node), ResNo(resno) {} + + /// get the index which selects a specific result in the SDNode + unsigned getResNo() const { return ResNo; } + + /// get the SDNode which holds the desired result + SDNode *getNode() const { return Node; } + + /// set the SDNode + void setNode(SDNode *N) { Node = N; } + + bool operator==(const SDValue &O) const { + return Node == O.Node && ResNo == O.ResNo; + } + bool operator!=(const SDValue &O) const { + return !operator==(O); + } + bool operator<(const SDValue &O) const { + return Node < O.Node || (Node == O.Node && ResNo < O.ResNo); + } + + SDValue getValue(unsigned R) const { + return SDValue(Node, R); + } + + // isOperandOf - Return true if this node is an operand of N. + bool isOperandOf(SDNode *N) const; + + /// getValueType - Return the ValueType of the referenced return value. + /// + inline MVT getValueType() const; + + /// getValueSizeInBits - Returns the size of the value in bits. + /// + unsigned getValueSizeInBits() const { + return getValueType().getSizeInBits(); + } + + // Forwarding methods - These forward to the corresponding methods in SDNode. + inline unsigned getOpcode() const; + inline unsigned getNumOperands() const; + inline const SDValue &getOperand(unsigned i) const; + inline uint64_t getConstantOperandVal(unsigned i) const; + inline bool isTargetOpcode() const; + inline bool isMachineOpcode() const; + inline unsigned getMachineOpcode() const; + inline const DebugLoc getDebugLoc() const; + + + /// reachesChainWithoutSideEffects - Return true if this operand (which must + /// be a chain) reaches the specified operand without crossing any + /// side-effecting instructions. In practice, this looks through token + /// factors and non-volatile loads. In order to remain efficient, this only + /// looks a couple of nodes in, it does not do an exhaustive search. + bool reachesChainWithoutSideEffects(SDValue Dest, + unsigned Depth = 2) const; + + /// use_empty - Return true if there are no nodes using value ResNo + /// of Node. + /// + inline bool use_empty() const; + + /// hasOneUse - Return true if there is exactly one node using value + /// ResNo of Node. + /// + inline bool hasOneUse() const; +}; + + +template<> struct DenseMapInfo<SDValue> { + static inline SDValue getEmptyKey() { + return SDValue((SDNode*)-1, -1U); + } + static inline SDValue getTombstoneKey() { + return SDValue((SDNode*)-1, 0); + } + static unsigned getHashValue(const SDValue &Val) { + return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^ + (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo(); + } + static bool isEqual(const SDValue &LHS, const SDValue &RHS) { + return LHS == RHS; + } + static bool isPod() { return true; } +}; + +/// simplify_type specializations - Allow casting operators to work directly on +/// SDValues as if they were SDNode*'s. +template<> struct simplify_type<SDValue> { + typedef SDNode* SimpleType; + static SimpleType getSimplifiedValue(const SDValue &Val) { + return static_cast<SimpleType>(Val.getNode()); + } +}; +template<> struct simplify_type<const SDValue> { + typedef SDNode* SimpleType; + static SimpleType getSimplifiedValue(const SDValue &Val) { + return static_cast<SimpleType>(Val.getNode()); + } +}; + +/// SDUse - Represents a use of a SDNode. This class holds an SDValue, +/// which records the SDNode being used and the result number, a +/// pointer to the SDNode using the value, and Next and Prev pointers, +/// which link together all the uses of an SDNode. +/// +class SDUse { + /// Val - The value being used. + SDValue Val; + /// User - The user of this value. + SDNode *User; + /// Prev, Next - Pointers to the uses list of the SDNode referred by + /// this operand. + SDUse **Prev, *Next; + + SDUse(const SDUse &U); // Do not implement + void operator=(const SDUse &U); // Do not implement + +public: + SDUse() : Val(), User(NULL), Prev(NULL), Next(NULL) {} + + /// Normally SDUse will just implicitly convert to an SDValue that it holds. + operator const SDValue&() const { return Val; } + + /// If implicit conversion to SDValue doesn't work, the get() method returns + /// the SDValue. + const SDValue &get() const { return Val; } + + /// getUser - This returns the SDNode that contains this Use. + SDNode *getUser() { return User; } + + /// getNext - Get the next SDUse in the use list. + SDUse *getNext() const { return Next; } + + /// getNode - Convenience function for get().getNode(). + SDNode *getNode() const { return Val.getNode(); } + /// getResNo - Convenience function for get().getResNo(). + unsigned getResNo() const { return Val.getResNo(); } + /// getValueType - Convenience function for get().getValueType(). + MVT getValueType() const { return Val.getValueType(); } + + /// operator== - Convenience function for get().operator== + bool operator==(const SDValue &V) const { + return Val == V; + } + + /// operator!= - Convenience function for get().operator!= + bool operator!=(const SDValue &V) const { + return Val != V; + } + + /// operator< - Convenience function for get().operator< + bool operator<(const SDValue &V) const { + return Val < V; + } + +private: + friend class SelectionDAG; + friend class SDNode; + + void setUser(SDNode *p) { User = p; } + + /// set - Remove this use from its existing use list, assign it the + /// given value, and add it to the new value's node's use list. + inline void set(const SDValue &V); + /// setInitial - like set, but only supports initializing a newly-allocated + /// SDUse with a non-null value. + inline void setInitial(const SDValue &V); + /// setNode - like set, but only sets the Node portion of the value, + /// leaving the ResNo portion unmodified. + inline void setNode(SDNode *N); + + void addToList(SDUse **List) { + Next = *List; + if (Next) Next->Prev = &Next; + Prev = List; + *List = this; + } + + void removeFromList() { + *Prev = Next; + if (Next) Next->Prev = Prev; + } +}; + +/// simplify_type specializations - Allow casting operators to work directly on +/// SDValues as if they were SDNode*'s. +template<> struct simplify_type<SDUse> { + typedef SDNode* SimpleType; + static SimpleType getSimplifiedValue(const SDUse &Val) { + return static_cast<SimpleType>(Val.getNode()); + } +}; +template<> struct simplify_type<const SDUse> { + typedef SDNode* SimpleType; + static SimpleType getSimplifiedValue(const SDUse &Val) { + return static_cast<SimpleType>(Val.getNode()); + } +}; + + +/// SDNode - Represents one node in the SelectionDAG. +/// +class SDNode : public FoldingSetNode, public ilist_node<SDNode> { +private: + /// NodeType - The operation that this node performs. + /// + short NodeType; + + /// OperandsNeedDelete - This is true if OperandList was new[]'d. If true, + /// then they will be delete[]'d when the node is destroyed. + unsigned short OperandsNeedDelete : 1; + +protected: + /// SubclassData - This member is defined by this class, but is not used for + /// anything. Subclasses can use it to hold whatever state they find useful. + /// This field is initialized to zero by the ctor. + unsigned short SubclassData : 15; + +private: + /// NodeId - Unique id per SDNode in the DAG. + int NodeId; + + /// OperandList - The values that are used by this operation. + /// + SDUse *OperandList; + + /// ValueList - The types of the values this node defines. SDNode's may + /// define multiple values simultaneously. + const MVT *ValueList; + + /// UseList - List of uses for this SDNode. + SDUse *UseList; + + /// NumOperands/NumValues - The number of entries in the Operand/Value list. + unsigned short NumOperands, NumValues; + + /// debugLoc - source line information. + DebugLoc debugLoc; + + /// getValueTypeList - Return a pointer to the specified value type. + static const MVT *getValueTypeList(MVT VT); + + friend class SelectionDAG; + friend struct ilist_traits<SDNode>; + +public: + //===--------------------------------------------------------------------===// + // Accessors + // + + /// getOpcode - Return the SelectionDAG opcode value for this node. For + /// pre-isel nodes (those for which isMachineOpcode returns false), these + /// are the opcode values in the ISD and <target>ISD namespaces. For + /// post-isel opcodes, see getMachineOpcode. + unsigned getOpcode() const { return (unsigned short)NodeType; } + + /// isTargetOpcode - Test if this node has a target-specific opcode (in the + /// \<target\>ISD namespace). + bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; } + + /// isMachineOpcode - Test if this node has a post-isel opcode, directly + /// corresponding to a MachineInstr opcode. + bool isMachineOpcode() const { return NodeType < 0; } + + /// getMachineOpcode - This may only be called if isMachineOpcode returns + /// true. It returns the MachineInstr opcode value that the node's opcode + /// corresponds to. + unsigned getMachineOpcode() const { + assert(isMachineOpcode() && "Not a MachineInstr opcode!"); + return ~NodeType; + } + + /// use_empty - Return true if there are no uses of this node. + /// + bool use_empty() const { return UseList == NULL; } + + /// hasOneUse - Return true if there is exactly one use of this node. + /// + bool hasOneUse() const { + return !use_empty() && next(use_begin()) == use_end(); + } + + /// use_size - Return the number of uses of this node. This method takes + /// time proportional to the number of uses. + /// + size_t use_size() const { return std::distance(use_begin(), use_end()); } + + /// getNodeId - Return the unique node id. + /// + int getNodeId() const { return NodeId; } + + /// setNodeId - Set unique node id. + void setNodeId(int Id) { NodeId = Id; } + + /// getDebugLoc - Return the source location info. + const DebugLoc getDebugLoc() const { return debugLoc; } + + /// setDebugLoc - Set source location info. Try to avoid this, putting + /// it in the constructor is preferable. + void setDebugLoc(const DebugLoc dl) { debugLoc = dl; } + + /// use_iterator - This class provides iterator support for SDUse + /// operands that use a specific SDNode. + class use_iterator + : public forward_iterator<SDUse, ptrdiff_t> { + SDUse *Op; + explicit use_iterator(SDUse *op) : Op(op) { + } + friend class SDNode; + public: + typedef forward_iterator<SDUse, ptrdiff_t>::reference reference; + typedef forward_iterator<SDUse, ptrdiff_t>::pointer pointer; + + use_iterator(const use_iterator &I) : Op(I.Op) {} + use_iterator() : Op(0) {} + + bool operator==(const use_iterator &x) const { + return Op == x.Op; + } + bool operator!=(const use_iterator &x) const { + return !operator==(x); + } + + /// atEnd - return true if this iterator is at the end of uses list. + bool atEnd() const { return Op == 0; } + + // Iterator traversal: forward iteration only. + use_iterator &operator++() { // Preincrement + assert(Op && "Cannot increment end iterator!"); + Op = Op->getNext(); + return *this; + } + + use_iterator operator++(int) { // Postincrement + use_iterator tmp = *this; ++*this; return tmp; + } + + /// Retrieve a pointer to the current user node. + SDNode *operator*() const { + assert(Op && "Cannot dereference end iterator!"); + return Op->getUser(); + } + + SDNode *operator->() const { return operator*(); } + + SDUse &getUse() const { return *Op; } + + /// getOperandNo - Retrieve the operand # of this use in its user. + /// + unsigned getOperandNo() const { + assert(Op && "Cannot dereference end iterator!"); + return (unsigned)(Op - Op->getUser()->OperandList); + } + }; + + /// use_begin/use_end - Provide iteration support to walk over all uses + /// of an SDNode. + + use_iterator use_begin() const { + return use_iterator(UseList); + } + + static use_iterator use_end() { return use_iterator(0); } + + + /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the + /// indicated value. This method ignores uses of other values defined by this + /// operation. + bool hasNUsesOfValue(unsigned NUses, unsigned Value) const; + + /// hasAnyUseOfValue - Return true if there are any use of the indicated + /// value. This method ignores uses of other values defined by this operation. + bool hasAnyUseOfValue(unsigned Value) const; + + /// isOnlyUserOf - Return true if this node is the only use of N. + /// + bool isOnlyUserOf(SDNode *N) const; + + /// isOperandOf - Return true if this node is an operand of N. + /// + bool isOperandOf(SDNode *N) const; + + /// isPredecessorOf - Return true if this node is a predecessor of N. This + /// node is either an operand of N or it can be reached by recursively + /// traversing up the operands. + /// NOTE: this is an expensive method. Use it carefully. + bool isPredecessorOf(SDNode *N) const; + + /// getNumOperands - Return the number of values used by this operation. + /// + unsigned getNumOperands() const { return NumOperands; } + + /// getConstantOperandVal - Helper method returns the integer value of a + /// ConstantSDNode operand. + uint64_t getConstantOperandVal(unsigned Num) const; + + const SDValue &getOperand(unsigned Num) const { + assert(Num < NumOperands && "Invalid child # of SDNode!"); + return OperandList[Num]; + } + + typedef SDUse* op_iterator; + op_iterator op_begin() const { return OperandList; } + op_iterator op_end() const { return OperandList+NumOperands; } + + SDVTList getVTList() const { + SDVTList X = { ValueList, NumValues }; + return X; + }; + + /// getFlaggedNode - If this node has a flag operand, return the node + /// to which the flag operand points. Otherwise return NULL. + SDNode *getFlaggedNode() const { + if (getNumOperands() != 0 && + getOperand(getNumOperands()-1).getValueType() == MVT::Flag) + return getOperand(getNumOperands()-1).getNode(); + return 0; + } + + // If this is a pseudo op, like copyfromreg, look to see if there is a + // real target node flagged to it. If so, return the target node. + const SDNode *getFlaggedMachineNode() const { + const SDNode *FoundNode = this; + + // Climb up flag edges until a machine-opcode node is found, or the + // end of the chain is reached. + while (!FoundNode->isMachineOpcode()) { + const SDNode *N = FoundNode->getFlaggedNode(); + if (!N) break; + FoundNode = N; + } + + return FoundNode; + } + + /// getNumValues - Return the number of values defined/returned by this + /// operator. + /// + unsigned getNumValues() const { return NumValues; } + + /// getValueType - Return the type of a specified result. + /// + MVT getValueType(unsigned ResNo) const { + assert(ResNo < NumValues && "Illegal result number!"); + return ValueList[ResNo]; + } + + /// getValueSizeInBits - Returns MVT::getSizeInBits(getValueType(ResNo)). + /// + unsigned getValueSizeInBits(unsigned ResNo) const { + return getValueType(ResNo).getSizeInBits(); + } + + typedef const MVT* value_iterator; + value_iterator value_begin() const { return ValueList; } + value_iterator value_end() const { return ValueList+NumValues; } + + /// getOperationName - Return the opcode of this operation for printing. + /// + std::string getOperationName(const SelectionDAG *G = 0) const; + static const char* getIndexedModeName(ISD::MemIndexedMode AM); + void print_types(raw_ostream &OS, const SelectionDAG *G) const; + void print_details(raw_ostream &OS, const SelectionDAG *G) const; + void print(raw_ostream &OS, const SelectionDAG *G = 0) const; + void printr(raw_ostream &OS, const SelectionDAG *G = 0) const; + void dump() const; + void dumpr() const; + void dump(const SelectionDAG *G) const; + + static bool classof(const SDNode *) { return true; } + + /// Profile - Gather unique data for the node. + /// + void Profile(FoldingSetNodeID &ID) const; + + /// addUse - This method should only be used by the SDUse class. + /// + void addUse(SDUse &U) { U.addToList(&UseList); } + +protected: + static SDVTList getSDVTList(MVT VT) { + SDVTList Ret = { getValueTypeList(VT), 1 }; + return Ret; + } + + SDNode(unsigned Opc, const DebugLoc dl, SDVTList VTs, const SDValue *Ops, + unsigned NumOps) + : NodeType(Opc), OperandsNeedDelete(true), SubclassData(0), + NodeId(-1), + OperandList(NumOps ? new SDUse[NumOps] : 0), + ValueList(VTs.VTs), UseList(NULL), + NumOperands(NumOps), NumValues(VTs.NumVTs), + debugLoc(dl) { + for (unsigned i = 0; i != NumOps; ++i) { + OperandList[i].setUser(this); + OperandList[i].setInitial(Ops[i]); + } + } + + /// This constructor adds no operands itself; operands can be + /// set later with InitOperands. + SDNode(unsigned Opc, const DebugLoc dl, SDVTList VTs) + : NodeType(Opc), OperandsNeedDelete(false), SubclassData(0), + NodeId(-1), OperandList(0), ValueList(VTs.VTs), UseList(NULL), + NumOperands(0), NumValues(VTs.NumVTs), + debugLoc(dl) {} + + /// InitOperands - Initialize the operands list of this with 1 operand. + void InitOperands(SDUse *Ops, const SDValue &Op0) { + Ops[0].setUser(this); + Ops[0].setInitial(Op0); + NumOperands = 1; + OperandList = Ops; + } + + /// InitOperands - Initialize the operands list of this with 2 operands. + void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1) { + Ops[0].setUser(this); + Ops[0].setInitial(Op0); + Ops[1].setUser(this); + Ops[1].setInitial(Op1); + NumOperands = 2; + OperandList = Ops; + } + + /// InitOperands - Initialize the operands list of this with 3 operands. + void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1, + const SDValue &Op2) { + Ops[0].setUser(this); + Ops[0].setInitial(Op0); + Ops[1].setUser(this); + Ops[1].setInitial(Op1); + Ops[2].setUser(this); + Ops[2].setInitial(Op2); + NumOperands = 3; + OperandList = Ops; + } + + /// InitOperands - Initialize the operands list of this with 4 operands. + void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1, + const SDValue &Op2, const SDValue &Op3) { + Ops[0].setUser(this); + Ops[0].setInitial(Op0); + Ops[1].setUser(this); + Ops[1].setInitial(Op1); + Ops[2].setUser(this); + Ops[2].setInitial(Op2); + Ops[3].setUser(this); + Ops[3].setInitial(Op3); + NumOperands = 4; + OperandList = Ops; + } + + /// InitOperands - Initialize the operands list of this with N operands. + void InitOperands(SDUse *Ops, const SDValue *Vals, unsigned N) { + for (unsigned i = 0; i != N; ++i) { + Ops[i].setUser(this); + Ops[i].setInitial(Vals[i]); + } + NumOperands = N; + OperandList = Ops; + } + + /// DropOperands - Release the operands and set this node to have + /// zero operands. + void DropOperands(); +}; + + +// Define inline functions from the SDValue class. + +inline unsigned SDValue::getOpcode() const { + return Node->getOpcode(); +} +inline MVT SDValue::getValueType() const { + return Node->getValueType(ResNo); +} +inline unsigned SDValue::getNumOperands() const { + return Node->getNumOperands(); +} +inline const SDValue &SDValue::getOperand(unsigned i) const { + return Node->getOperand(i); +} +inline uint64_t SDValue::getConstantOperandVal(unsigned i) const { + return Node->getConstantOperandVal(i); +} +inline bool SDValue::isTargetOpcode() const { + return Node->isTargetOpcode(); +} +inline bool SDValue::isMachineOpcode() const { + return Node->isMachineOpcode(); +} +inline unsigned SDValue::getMachineOpcode() const { + return Node->getMachineOpcode(); +} +inline bool SDValue::use_empty() const { + return !Node->hasAnyUseOfValue(ResNo); +} +inline bool SDValue::hasOneUse() const { + return Node->hasNUsesOfValue(1, ResNo); +} +inline const DebugLoc SDValue::getDebugLoc() const { + return Node->getDebugLoc(); +} + +// Define inline functions from the SDUse class. + +inline void SDUse::set(const SDValue &V) { + if (Val.getNode()) removeFromList(); + Val = V; + if (V.getNode()) V.getNode()->addUse(*this); +} + +inline void SDUse::setInitial(const SDValue &V) { + Val = V; + V.getNode()->addUse(*this); +} + +inline void SDUse::setNode(SDNode *N) { + if (Val.getNode()) removeFromList(); + Val.setNode(N); + if (N) N->addUse(*this); +} + +/// UnarySDNode - This class is used for single-operand SDNodes. This is solely +/// to allow co-allocation of node operands with the node itself. +class UnarySDNode : public SDNode { + SDUse Op; +public: + UnarySDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, SDValue X) + : SDNode(Opc, dl, VTs) { + InitOperands(&Op, X); + } +}; + +/// BinarySDNode - This class is used for two-operand SDNodes. This is solely +/// to allow co-allocation of node operands with the node itself. +class BinarySDNode : public SDNode { + SDUse Ops[2]; +public: + BinarySDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, SDValue X, SDValue Y) + : SDNode(Opc, dl, VTs) { + InitOperands(Ops, X, Y); + } +}; + +/// TernarySDNode - This class is used for three-operand SDNodes. This is solely +/// to allow co-allocation of node operands with the node itself. +class TernarySDNode : public SDNode { + SDUse Ops[3]; +public: + TernarySDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, SDValue X, SDValue Y, + SDValue Z) + : SDNode(Opc, dl, VTs) { + InitOperands(Ops, X, Y, Z); + } +}; + + +/// HandleSDNode - This class is used to form a handle around another node that +/// is persistant and is updated across invocations of replaceAllUsesWith on its +/// operand. This node should be directly created by end-users and not added to +/// the AllNodes list. +class HandleSDNode : public SDNode { + SDUse Op; +public: + // FIXME: Remove the "noinline" attribute once <rdar://problem/5852746> is + // fixed. +#ifdef __GNUC__ + explicit __attribute__((__noinline__)) HandleSDNode(SDValue X) +#else + explicit HandleSDNode(SDValue X) +#endif + : SDNode(ISD::HANDLENODE, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)) { + InitOperands(&Op, X); + } + ~HandleSDNode(); + const SDValue &getValue() const { return Op; } +}; + +/// Abstact virtual class for operations for memory operations +class MemSDNode : public SDNode { +private: + // MemoryVT - VT of in-memory value. + MVT MemoryVT; + + //! SrcValue - Memory location for alias analysis. + const Value *SrcValue; + + //! SVOffset - Memory location offset. Note that base is defined in MemSDNode + int SVOffset; + +public: + MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, MVT MemoryVT, + const Value *srcValue, int SVOff, + unsigned alignment, bool isvolatile); + + MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, const SDValue *Ops, + unsigned NumOps, MVT MemoryVT, const Value *srcValue, int SVOff, + unsigned alignment, bool isvolatile); + + /// Returns alignment and volatility of the memory access + unsigned getAlignment() const { return (1u << (SubclassData >> 6)) >> 1; } + bool isVolatile() const { return (SubclassData >> 5) & 1; } + + /// getRawSubclassData - Return the SubclassData value, which contains an + /// encoding of the alignment and volatile information, as well as bits + /// used by subclasses. This function should only be used to compute a + /// FoldingSetNodeID value. + unsigned getRawSubclassData() const { + return SubclassData; + } + + /// Returns the SrcValue and offset that describes the location of the access + const Value *getSrcValue() const { return SrcValue; } + int getSrcValueOffset() const { return SVOffset; } + + /// getMemoryVT - Return the type of the in-memory value. + MVT getMemoryVT() const { return MemoryVT; } + + /// getMemOperand - Return a MachineMemOperand object describing the memory + /// reference performed by operation. + MachineMemOperand getMemOperand() const; + + const SDValue &getChain() const { return getOperand(0); } + const SDValue &getBasePtr() const { + return getOperand(getOpcode() == ISD::STORE ? 2 : 1); + } + + // Methods to support isa and dyn_cast + static bool classof(const MemSDNode *) { return true; } + static bool classof(const SDNode *N) { + // For some targets, we lower some target intrinsics to a MemIntrinsicNode + // with either an intrinsic or a target opcode. + return N->getOpcode() == ISD::LOAD || + N->getOpcode() == ISD::STORE || + N->getOpcode() == ISD::ATOMIC_CMP_SWAP || + N->getOpcode() == ISD::ATOMIC_SWAP || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB || + N->getOpcode() == ISD::ATOMIC_LOAD_AND || + N->getOpcode() == ISD::ATOMIC_LOAD_OR || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || + N->getOpcode() == ISD::INTRINSIC_W_CHAIN || + N->getOpcode() == ISD::INTRINSIC_VOID || + N->isTargetOpcode(); + } +}; + +/// AtomicSDNode - A SDNode reprenting atomic operations. +/// +class AtomicSDNode : public MemSDNode { + SDUse Ops[4]; + +public: + // Opc: opcode for atomic + // VTL: value type list + // Chain: memory chain for operaand + // Ptr: address to update as a SDValue + // Cmp: compare value + // Swp: swap value + // SrcVal: address to update as a Value (used for MemOperand) + // Align: alignment of memory + AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, MVT MemVT, + SDValue Chain, SDValue Ptr, + SDValue Cmp, SDValue Swp, const Value* SrcVal, + unsigned Align=0) + : MemSDNode(Opc, dl, VTL, MemVT, SrcVal, /*SVOffset=*/0, + Align, /*isVolatile=*/true) { + InitOperands(Ops, Chain, Ptr, Cmp, Swp); + } + AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, MVT MemVT, + SDValue Chain, SDValue Ptr, + SDValue Val, const Value* SrcVal, unsigned Align=0) + : MemSDNode(Opc, dl, VTL, MemVT, SrcVal, /*SVOffset=*/0, + Align, /*isVolatile=*/true) { + InitOperands(Ops, Chain, Ptr, Val); + } + + const SDValue &getBasePtr() const { return getOperand(1); } + const SDValue &getVal() const { return getOperand(2); } + + bool isCompareAndSwap() const { + unsigned Op = getOpcode(); + return Op == ISD::ATOMIC_CMP_SWAP; + } + + // Methods to support isa and dyn_cast + static bool classof(const AtomicSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || + N->getOpcode() == ISD::ATOMIC_SWAP || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB || + N->getOpcode() == ISD::ATOMIC_LOAD_AND || + N->getOpcode() == ISD::ATOMIC_LOAD_OR || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX; + } +}; + +/// MemIntrinsicSDNode - This SDNode is used for target intrinsic that touches +/// memory and need an associated memory operand. +/// +class MemIntrinsicSDNode : public MemSDNode { + bool ReadMem; // Intrinsic reads memory + bool WriteMem; // Intrinsic writes memory +public: + MemIntrinsicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, + const SDValue *Ops, unsigned NumOps, + MVT MemoryVT, const Value *srcValue, int SVO, + unsigned Align, bool Vol, bool ReadMem, bool WriteMem) + : MemSDNode(Opc, dl, VTs, Ops, NumOps, MemoryVT, srcValue, SVO, Align, Vol), + ReadMem(ReadMem), WriteMem(WriteMem) { + } + + bool readMem() const { return ReadMem; } + bool writeMem() const { return WriteMem; } + + // Methods to support isa and dyn_cast + static bool classof(const MemIntrinsicSDNode *) { return true; } + static bool classof(const SDNode *N) { + // We lower some target intrinsics to their target opcode + // early a node with a target opcode can be of this class + return N->getOpcode() == ISD::INTRINSIC_W_CHAIN || + N->getOpcode() == ISD::INTRINSIC_VOID || + N->isTargetOpcode(); + } +}; + +/// ShuffleVectorSDNode - This SDNode is used to implement the code generator +/// support for the llvm IR shufflevector instruction. It combines elements +/// from two input vectors into a new input vector, with the selection and +/// ordering of elements determined by an array of integers, referred to as +/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1 +/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS. +/// An index of -1 is treated as undef, such that the code generator may put +/// any value in the corresponding element of the result. +class ShuffleVectorSDNode : public SDNode { + SDUse Ops[2]; + + // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and + // is freed when the SelectionDAG object is destroyed. + const int *Mask; +protected: + friend class SelectionDAG; + ShuffleVectorSDNode(MVT VT, DebugLoc dl, SDValue N1, SDValue N2, + const int *M) + : SDNode(ISD::VECTOR_SHUFFLE, dl, getSDVTList(VT)), Mask(M) { + InitOperands(Ops, N1, N2); + } +public: + + void getMask(SmallVectorImpl<int> &M) const { + MVT VT = getValueType(0); + M.clear(); + for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) + M.push_back(Mask[i]); + } + int getMaskElt(unsigned Idx) const { + assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"); + return Mask[Idx]; + } + + bool isSplat() const { return isSplatMask(Mask, getValueType(0)); } + int getSplatIndex() const { + assert(isSplat() && "Cannot get splat index for non-splat!"); + return Mask[0]; + } + static bool isSplatMask(const int *Mask, MVT VT); + + static bool classof(const ShuffleVectorSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::VECTOR_SHUFFLE; + } +}; + +class ConstantSDNode : public SDNode { + const ConstantInt *Value; + friend class SelectionDAG; + ConstantSDNode(bool isTarget, const ConstantInt *val, MVT VT) + : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, + DebugLoc::getUnknownLoc(), getSDVTList(VT)), Value(val) { + } +public: + + const ConstantInt *getConstantIntValue() const { return Value; } + const APInt &getAPIntValue() const { return Value->getValue(); } + uint64_t getZExtValue() const { return Value->getZExtValue(); } + int64_t getSExtValue() const { return Value->getSExtValue(); } + + bool isNullValue() const { return Value->isNullValue(); } + bool isAllOnesValue() const { return Value->isAllOnesValue(); } + + static bool classof(const ConstantSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::Constant || + N->getOpcode() == ISD::TargetConstant; + } +}; + +class ConstantFPSDNode : public SDNode { + const ConstantFP *Value; + friend class SelectionDAG; + ConstantFPSDNode(bool isTarget, const ConstantFP *val, MVT VT) + : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, + DebugLoc::getUnknownLoc(), getSDVTList(VT)), Value(val) { + } +public: + + const APFloat& getValueAPF() const { return Value->getValueAPF(); } + const ConstantFP *getConstantFPValue() const { return Value; } + + /// isExactlyValue - We don't rely on operator== working on double values, as + /// it returns true for things that are clearly not equal, like -0.0 and 0.0. + /// As such, this method can be used to do an exact bit-for-bit comparison of + /// two floating point values. + + /// We leave the version with the double argument here because it's just so + /// convenient to write "2.0" and the like. Without this function we'd + /// have to duplicate its logic everywhere it's called. + bool isExactlyValue(double V) const { + bool ignored; + // convert is not supported on this type + if (&Value->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble) + return false; + APFloat Tmp(V); + Tmp.convert(Value->getValueAPF().getSemantics(), + APFloat::rmNearestTiesToEven, &ignored); + return isExactlyValue(Tmp); + } + bool isExactlyValue(const APFloat& V) const; + + bool isValueValidForType(MVT VT, const APFloat& Val); + + static bool classof(const ConstantFPSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::ConstantFP || + N->getOpcode() == ISD::TargetConstantFP; + } +}; + +class GlobalAddressSDNode : public SDNode { + GlobalValue *TheGlobal; + int64_t Offset; + friend class SelectionDAG; + GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, MVT VT, + int64_t o = 0); +public: + + GlobalValue *getGlobal() const { return TheGlobal; } + int64_t getOffset() const { return Offset; } + // Return the address space this GlobalAddress belongs to. + unsigned getAddressSpace() const; + + static bool classof(const GlobalAddressSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::GlobalAddress || + N->getOpcode() == ISD::TargetGlobalAddress || + N->getOpcode() == ISD::GlobalTLSAddress || + N->getOpcode() == ISD::TargetGlobalTLSAddress; + } +}; + +class FrameIndexSDNode : public SDNode { + int FI; + friend class SelectionDAG; + FrameIndexSDNode(int fi, MVT VT, bool isTarg) + : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex, + DebugLoc::getUnknownLoc(), getSDVTList(VT)), FI(fi) { + } +public: + + int getIndex() const { return FI; } + + static bool classof(const FrameIndexSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::FrameIndex || + N->getOpcode() == ISD::TargetFrameIndex; + } +}; + +class JumpTableSDNode : public SDNode { + int JTI; + friend class SelectionDAG; + JumpTableSDNode(int jti, MVT VT, bool isTarg) + : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable, + DebugLoc::getUnknownLoc(), getSDVTList(VT)), JTI(jti) { + } +public: + + int getIndex() const { return JTI; } + + static bool classof(const JumpTableSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::JumpTable || + N->getOpcode() == ISD::TargetJumpTable; + } +}; + +class ConstantPoolSDNode : public SDNode { + union { + Constant *ConstVal; + MachineConstantPoolValue *MachineCPVal; + } Val; + int Offset; // It's a MachineConstantPoolValue if top bit is set. + unsigned Alignment; // Minimum alignment requirement of CP (not log2 value). + friend class SelectionDAG; + ConstantPoolSDNode(bool isTarget, Constant *c, MVT VT, int o=0) + : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, + DebugLoc::getUnknownLoc(), + getSDVTList(VT)), Offset(o), Alignment(0) { + assert((int)Offset >= 0 && "Offset is too large"); + Val.ConstVal = c; + } + ConstantPoolSDNode(bool isTarget, Constant *c, MVT VT, int o, unsigned Align) + : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, + DebugLoc::getUnknownLoc(), + getSDVTList(VT)), Offset(o), Alignment(Align) { + assert((int)Offset >= 0 && "Offset is too large"); + Val.ConstVal = c; + } + ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, + MVT VT, int o=0) + : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, + DebugLoc::getUnknownLoc(), + getSDVTList(VT)), Offset(o), Alignment(0) { + assert((int)Offset >= 0 && "Offset is too large"); + Val.MachineCPVal = v; + Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1); + } + ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, + MVT VT, int o, unsigned Align) + : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, + DebugLoc::getUnknownLoc(), + getSDVTList(VT)), Offset(o), Alignment(Align) { + assert((int)Offset >= 0 && "Offset is too large"); + Val.MachineCPVal = v; + Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1); + } +public: + + bool isMachineConstantPoolEntry() const { + return (int)Offset < 0; + } + + Constant *getConstVal() const { + assert(!isMachineConstantPoolEntry() && "Wrong constantpool type"); + return Val.ConstVal; + } + + MachineConstantPoolValue *getMachineCPVal() const { + assert(isMachineConstantPoolEntry() && "Wrong constantpool type"); + return Val.MachineCPVal; + } + + int getOffset() const { + return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1)); + } + + // Return the alignment of this constant pool object, which is either 0 (for + // default alignment) or the desired value. + unsigned getAlignment() const { return Alignment; } + + const Type *getType() const; + + static bool classof(const ConstantPoolSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::ConstantPool || + N->getOpcode() == ISD::TargetConstantPool; + } +}; + +class BasicBlockSDNode : public SDNode { + MachineBasicBlock *MBB; + friend class SelectionDAG; + /// Debug info is meaningful and potentially useful here, but we create + /// blocks out of order when they're jumped to, which makes it a bit + /// harder. Let's see if we need it first. + explicit BasicBlockSDNode(MachineBasicBlock *mbb) + : SDNode(ISD::BasicBlock, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), MBB(mbb) { + } +public: + + MachineBasicBlock *getBasicBlock() const { return MBB; } + + static bool classof(const BasicBlockSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::BasicBlock; + } +}; + +/// BuildVectorSDNode - A "pseudo-class" with methods for operating on +/// BUILD_VECTORs. +class BuildVectorSDNode : public SDNode { + // These are constructed as SDNodes and then cast to BuildVectorSDNodes. + explicit BuildVectorSDNode(); // Do not implement +public: + /// isConstantSplat - Check if this is a constant splat, and if so, find the + /// smallest element size that splats the vector. If MinSplatBits is + /// nonzero, the element size must be at least that large. Note that the + /// splat element may be the entire vector (i.e., a one element vector). + /// Returns the splat element value in SplatValue. Any undefined bits in + /// that value are zero, and the corresponding bits in the SplatUndef mask + /// are set. The SplatBitSize value is set to the splat element size in + /// bits. HasAnyUndefs is set to true if any bits in the vector are + /// undefined. + bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, + unsigned &SplatBitSize, bool &HasAnyUndefs, + unsigned MinSplatBits = 0); + + static inline bool classof(const BuildVectorSDNode *) { return true; } + static inline bool classof(const SDNode *N) { + return N->getOpcode() == ISD::BUILD_VECTOR; + } +}; + +/// SrcValueSDNode - An SDNode that holds an arbitrary LLVM IR Value. This is +/// used when the SelectionDAG needs to make a simple reference to something +/// in the LLVM IR representation. +/// +/// Note that this is not used for carrying alias information; that is done +/// with MemOperandSDNode, which includes a Value which is required to be a +/// pointer, and several other fields specific to memory references. +/// +class SrcValueSDNode : public SDNode { + const Value *V; + friend class SelectionDAG; + /// Create a SrcValue for a general value. + explicit SrcValueSDNode(const Value *v) + : SDNode(ISD::SRCVALUE, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), V(v) {} + +public: + /// getValue - return the contained Value. + const Value *getValue() const { return V; } + + static bool classof(const SrcValueSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::SRCVALUE; + } +}; + + +/// MemOperandSDNode - An SDNode that holds a MachineMemOperand. This is +/// used to represent a reference to memory after ISD::LOAD +/// and ISD::STORE have been lowered. +/// +class MemOperandSDNode : public SDNode { + friend class SelectionDAG; + /// Create a MachineMemOperand node + explicit MemOperandSDNode(const MachineMemOperand &mo) + : SDNode(ISD::MEMOPERAND, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), MO(mo) {} + +public: + /// MO - The contained MachineMemOperand. + const MachineMemOperand MO; + + static bool classof(const MemOperandSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::MEMOPERAND; + } +}; + + +class RegisterSDNode : public SDNode { + unsigned Reg; + friend class SelectionDAG; + RegisterSDNode(unsigned reg, MVT VT) + : SDNode(ISD::Register, DebugLoc::getUnknownLoc(), + getSDVTList(VT)), Reg(reg) { + } +public: + + unsigned getReg() const { return Reg; } + + static bool classof(const RegisterSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::Register; + } +}; + +class DbgStopPointSDNode : public SDNode { + SDUse Chain; + unsigned Line; + unsigned Column; + Value *CU; + friend class SelectionDAG; + DbgStopPointSDNode(SDValue ch, unsigned l, unsigned c, + Value *cu) + : SDNode(ISD::DBG_STOPPOINT, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), Line(l), Column(c), CU(cu) { + InitOperands(&Chain, ch); + } +public: + unsigned getLine() const { return Line; } + unsigned getColumn() const { return Column; } + Value *getCompileUnit() const { return CU; } + + static bool classof(const DbgStopPointSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::DBG_STOPPOINT; + } +}; + +class LabelSDNode : public SDNode { + SDUse Chain; + unsigned LabelID; + friend class SelectionDAG; +LabelSDNode(unsigned NodeTy, DebugLoc dl, SDValue ch, unsigned id) + : SDNode(NodeTy, dl, getSDVTList(MVT::Other)), LabelID(id) { + InitOperands(&Chain, ch); + } +public: + unsigned getLabelID() const { return LabelID; } + + static bool classof(const LabelSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::DBG_LABEL || + N->getOpcode() == ISD::EH_LABEL; + } +}; + +class ExternalSymbolSDNode : public SDNode { + const char *Symbol; + friend class SelectionDAG; + ExternalSymbolSDNode(bool isTarget, const char *Sym, MVT VT) + : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, + DebugLoc::getUnknownLoc(), + getSDVTList(VT)), Symbol(Sym) { + } +public: + + const char *getSymbol() const { return Symbol; } + + static bool classof(const ExternalSymbolSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::ExternalSymbol || + N->getOpcode() == ISD::TargetExternalSymbol; + } +}; + +class CondCodeSDNode : public SDNode { + ISD::CondCode Condition; + friend class SelectionDAG; + explicit CondCodeSDNode(ISD::CondCode Cond) + : SDNode(ISD::CONDCODE, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), Condition(Cond) { + } +public: + + ISD::CondCode get() const { return Condition; } + + static bool classof(const CondCodeSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::CONDCODE; + } +}; + +/// CvtRndSatSDNode - NOTE: avoid using this node as this may disappear in the +/// future and most targets don't support it. +class CvtRndSatSDNode : public SDNode { + ISD::CvtCode CvtCode; + friend class SelectionDAG; + explicit CvtRndSatSDNode(MVT VT, DebugLoc dl, const SDValue *Ops, + unsigned NumOps, ISD::CvtCode Code) + : SDNode(ISD::CONVERT_RNDSAT, dl, getSDVTList(VT), Ops, NumOps), + CvtCode(Code) { + assert(NumOps == 5 && "wrong number of operations"); + } +public: + ISD::CvtCode getCvtCode() const { return CvtCode; } + + static bool classof(const CvtRndSatSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::CONVERT_RNDSAT; + } +}; + +namespace ISD { + struct ArgFlagsTy { + private: + static const uint64_t NoFlagSet = 0ULL; + static const uint64_t ZExt = 1ULL<<0; ///< Zero extended + static const uint64_t ZExtOffs = 0; + static const uint64_t SExt = 1ULL<<1; ///< Sign extended + static const uint64_t SExtOffs = 1; + static const uint64_t InReg = 1ULL<<2; ///< Passed in register + static const uint64_t InRegOffs = 2; + static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr + static const uint64_t SRetOffs = 3; + static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value + static const uint64_t ByValOffs = 4; + static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain + static const uint64_t NestOffs = 5; + static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment + static const uint64_t ByValAlignOffs = 6; + static const uint64_t Split = 1ULL << 10; + static const uint64_t SplitOffs = 10; + static const uint64_t OrigAlign = 0x1FULL<<27; + static const uint64_t OrigAlignOffs = 27; + static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size + static const uint64_t ByValSizeOffs = 32; + + static const uint64_t One = 1ULL; //< 1 of this type, for shifts + + uint64_t Flags; + public: + ArgFlagsTy() : Flags(0) { } + + bool isZExt() const { return Flags & ZExt; } + void setZExt() { Flags |= One << ZExtOffs; } + + bool isSExt() const { return Flags & SExt; } + void setSExt() { Flags |= One << SExtOffs; } + + bool isInReg() const { return Flags & InReg; } + void setInReg() { Flags |= One << InRegOffs; } + + bool isSRet() const { return Flags & SRet; } + void setSRet() { Flags |= One << SRetOffs; } + + bool isByVal() const { return Flags & ByVal; } + void setByVal() { Flags |= One << ByValOffs; } + + bool isNest() const { return Flags & Nest; } + void setNest() { Flags |= One << NestOffs; } + + unsigned getByValAlign() const { + return (unsigned) + ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2); + } + void setByValAlign(unsigned A) { + Flags = (Flags & ~ByValAlign) | + (uint64_t(Log2_32(A) + 1) << ByValAlignOffs); + } + + bool isSplit() const { return Flags & Split; } + void setSplit() { Flags |= One << SplitOffs; } + + unsigned getOrigAlign() const { + return (unsigned) + ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2); + } + void setOrigAlign(unsigned A) { + Flags = (Flags & ~OrigAlign) | + (uint64_t(Log2_32(A) + 1) << OrigAlignOffs); + } + + unsigned getByValSize() const { + return (unsigned)((Flags & ByValSize) >> ByValSizeOffs); + } + void setByValSize(unsigned S) { + Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs); + } + + /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4". + std::string getArgFlagsString(); + + /// getRawBits - Represent the flags as a bunch of bits. + uint64_t getRawBits() const { return Flags; } + }; +} + +/// ARG_FLAGSSDNode - Leaf node holding parameter flags. +class ARG_FLAGSSDNode : public SDNode { + ISD::ArgFlagsTy TheFlags; + friend class SelectionDAG; + explicit ARG_FLAGSSDNode(ISD::ArgFlagsTy Flags) + : SDNode(ISD::ARG_FLAGS, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), TheFlags(Flags) { + } +public: + ISD::ArgFlagsTy getArgFlags() const { return TheFlags; } + + static bool classof(const ARG_FLAGSSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::ARG_FLAGS; + } +}; + +/// CallSDNode - Node for calls -- ISD::CALL. +class CallSDNode : public SDNode { + unsigned CallingConv; + bool IsVarArg; + bool IsTailCall; + // We might eventually want a full-blown Attributes for the result; that + // will expand the size of the representation. At the moment we only + // need Inreg. + bool Inreg; + friend class SelectionDAG; + CallSDNode(unsigned cc, DebugLoc dl, bool isvararg, bool istailcall, + bool isinreg, SDVTList VTs, const SDValue *Operands, + unsigned numOperands) + : SDNode(ISD::CALL, dl, VTs, Operands, numOperands), + CallingConv(cc), IsVarArg(isvararg), IsTailCall(istailcall), + Inreg(isinreg) {} +public: + unsigned getCallingConv() const { return CallingConv; } + unsigned isVarArg() const { return IsVarArg; } + unsigned isTailCall() const { return IsTailCall; } + unsigned isInreg() const { return Inreg; } + + /// Set this call to not be marked as a tail call. Normally setter + /// methods in SDNodes are unsafe because it breaks the CSE map, + /// but we don't include the tail call flag for calls so it's ok + /// in this case. + void setNotTailCall() { IsTailCall = false; } + + SDValue getChain() const { return getOperand(0); } + SDValue getCallee() const { return getOperand(1); } + + unsigned getNumArgs() const { return (getNumOperands() - 2) / 2; } + SDValue getArg(unsigned i) const { return getOperand(2+2*i); } + SDValue getArgFlagsVal(unsigned i) const { + return getOperand(3+2*i); + } + ISD::ArgFlagsTy getArgFlags(unsigned i) const { + return cast<ARG_FLAGSSDNode>(getArgFlagsVal(i).getNode())->getArgFlags(); + } + + unsigned getNumRetVals() const { return getNumValues() - 1; } + MVT getRetValType(unsigned i) const { return getValueType(i); } + + static bool classof(const CallSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::CALL; + } +}; + +/// VTSDNode - This class is used to represent MVT's, which are used +/// to parameterize some operations. +class VTSDNode : public SDNode { + MVT ValueType; + friend class SelectionDAG; + explicit VTSDNode(MVT VT) + : SDNode(ISD::VALUETYPE, DebugLoc::getUnknownLoc(), + getSDVTList(MVT::Other)), ValueType(VT) { + } +public: + + MVT getVT() const { return ValueType; } + + static bool classof(const VTSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::VALUETYPE; + } +}; + +/// LSBaseSDNode - Base class for LoadSDNode and StoreSDNode +/// +class LSBaseSDNode : public MemSDNode { + //! Operand array for load and store + /*! + \note Moving this array to the base class captures more + common functionality shared between LoadSDNode and + StoreSDNode + */ + SDUse Ops[4]; +public: + LSBaseSDNode(ISD::NodeType NodeTy, DebugLoc dl, SDValue *Operands, + unsigned numOperands, SDVTList VTs, ISD::MemIndexedMode AM, + MVT VT, const Value *SV, int SVO, unsigned Align, bool Vol) + : MemSDNode(NodeTy, dl, VTs, VT, SV, SVO, Align, Vol) { + assert(Align != 0 && "Loads and stores should have non-zero aligment"); + SubclassData |= AM << 2; + assert(getAddressingMode() == AM && "MemIndexedMode encoding error!"); + InitOperands(Ops, Operands, numOperands); + assert((getOffset().getOpcode() == ISD::UNDEF || isIndexed()) && + "Only indexed loads and stores have a non-undef offset operand"); + } + + const SDValue &getOffset() const { + return getOperand(getOpcode() == ISD::LOAD ? 2 : 3); + } + + /// getAddressingMode - Return the addressing mode for this load or store: + /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. + ISD::MemIndexedMode getAddressingMode() const { + return ISD::MemIndexedMode((SubclassData >> 2) & 7); + } + + /// isIndexed - Return true if this is a pre/post inc/dec load/store. + bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } + + /// isUnindexed - Return true if this is NOT a pre/post inc/dec load/store. + bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } + + static bool classof(const LSBaseSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::LOAD || + N->getOpcode() == ISD::STORE; + } +}; + +/// LoadSDNode - This class is used to represent ISD::LOAD nodes. +/// +class LoadSDNode : public LSBaseSDNode { + friend class SelectionDAG; + LoadSDNode(SDValue *ChainPtrOff, DebugLoc dl, SDVTList VTs, + ISD::MemIndexedMode AM, ISD::LoadExtType ETy, MVT LVT, + const Value *SV, int O=0, unsigned Align=0, bool Vol=false) + : LSBaseSDNode(ISD::LOAD, dl, ChainPtrOff, 3, + VTs, AM, LVT, SV, O, Align, Vol) { + SubclassData |= (unsigned short)ETy; + assert(getExtensionType() == ETy && "LoadExtType encoding error!"); + } +public: + + /// getExtensionType - Return whether this is a plain node, + /// or one of the varieties of value-extending loads. + ISD::LoadExtType getExtensionType() const { + return ISD::LoadExtType(SubclassData & 3); + } + + const SDValue &getBasePtr() const { return getOperand(1); } + const SDValue &getOffset() const { return getOperand(2); } + + static bool classof(const LoadSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::LOAD; + } +}; + +/// StoreSDNode - This class is used to represent ISD::STORE nodes. +/// +class StoreSDNode : public LSBaseSDNode { + friend class SelectionDAG; + StoreSDNode(SDValue *ChainValuePtrOff, DebugLoc dl, SDVTList VTs, + ISD::MemIndexedMode AM, bool isTrunc, MVT SVT, + const Value *SV, int O=0, unsigned Align=0, bool Vol=false) + : LSBaseSDNode(ISD::STORE, dl, ChainValuePtrOff, 4, + VTs, AM, SVT, SV, O, Align, Vol) { + SubclassData |= (unsigned short)isTrunc; + assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!"); + } +public: + + /// isTruncatingStore - Return true if the op does a truncation before store. + /// For integers this is the same as doing a TRUNCATE and storing the result. + /// For floats, it is the same as doing an FP_ROUND and storing the result. + bool isTruncatingStore() const { return SubclassData & 1; } + + const SDValue &getValue() const { return getOperand(1); } + const SDValue &getBasePtr() const { return getOperand(2); } + const SDValue &getOffset() const { return getOperand(3); } + + static bool classof(const StoreSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::STORE; + } +}; + + +class SDNodeIterator : public forward_iterator<SDNode, ptrdiff_t> { + SDNode *Node; + unsigned Operand; + + SDNodeIterator(SDNode *N, unsigned Op) : Node(N), Operand(Op) {} +public: + bool operator==(const SDNodeIterator& x) const { + return Operand == x.Operand; + } + bool operator!=(const SDNodeIterator& x) const { return !operator==(x); } + + const SDNodeIterator &operator=(const SDNodeIterator &I) { + assert(I.Node == Node && "Cannot assign iterators to two different nodes!"); + Operand = I.Operand; + return *this; + } + + pointer operator*() const { + return Node->getOperand(Operand).getNode(); + } + pointer operator->() const { return operator*(); } + + SDNodeIterator& operator++() { // Preincrement + ++Operand; + return *this; + } + SDNodeIterator operator++(int) { // Postincrement + SDNodeIterator tmp = *this; ++*this; return tmp; + } + + static SDNodeIterator begin(SDNode *N) { return SDNodeIterator(N, 0); } + static SDNodeIterator end (SDNode *N) { + return SDNodeIterator(N, N->getNumOperands()); + } + + unsigned getOperand() const { return Operand; } + const SDNode *getNode() const { return Node; } +}; + +template <> struct GraphTraits<SDNode*> { + typedef SDNode NodeType; + typedef SDNodeIterator ChildIteratorType; + static inline NodeType *getEntryNode(SDNode *N) { return N; } + static inline ChildIteratorType child_begin(NodeType *N) { + return SDNodeIterator::begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return SDNodeIterator::end(N); + } +}; + +/// LargestSDNode - The largest SDNode class. +/// +typedef LoadSDNode LargestSDNode; + +/// MostAlignedSDNode - The SDNode class with the greatest alignment +/// requirement. +/// +typedef ARG_FLAGSSDNode MostAlignedSDNode; + +namespace ISD { + /// isNormalLoad - Returns true if the specified node is a non-extending + /// and unindexed load. + inline bool isNormalLoad(const SDNode *N) { + const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N); + return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD && + Ld->getAddressingMode() == ISD::UNINDEXED; + } + + /// isNON_EXTLoad - Returns true if the specified node is a non-extending + /// load. + inline bool isNON_EXTLoad(const SDNode *N) { + return isa<LoadSDNode>(N) && + cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD; + } + + /// isEXTLoad - Returns true if the specified node is a EXTLOAD. + /// + inline bool isEXTLoad(const SDNode *N) { + return isa<LoadSDNode>(N) && + cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD; + } + + /// isSEXTLoad - Returns true if the specified node is a SEXTLOAD. + /// + inline bool isSEXTLoad(const SDNode *N) { + return isa<LoadSDNode>(N) && + cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; + } + + /// isZEXTLoad - Returns true if the specified node is a ZEXTLOAD. + /// + inline bool isZEXTLoad(const SDNode *N) { + return isa<LoadSDNode>(N) && + cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; + } + + /// isUNINDEXEDLoad - Returns true if the specified node is an unindexed load. + /// + inline bool isUNINDEXEDLoad(const SDNode *N) { + return isa<LoadSDNode>(N) && + cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; + } + + /// isNormalStore - Returns true if the specified node is a non-truncating + /// and unindexed store. + inline bool isNormalStore(const SDNode *N) { + const StoreSDNode *St = dyn_cast<StoreSDNode>(N); + return St && !St->isTruncatingStore() && + St->getAddressingMode() == ISD::UNINDEXED; + } + + /// isNON_TRUNCStore - Returns true if the specified node is a non-truncating + /// store. + inline bool isNON_TRUNCStore(const SDNode *N) { + return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore(); + } + + /// isTRUNCStore - Returns true if the specified node is a truncating + /// store. + inline bool isTRUNCStore(const SDNode *N) { + return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore(); + } + + /// isUNINDEXEDStore - Returns true if the specified node is an + /// unindexed store. + inline bool isUNINDEXEDStore(const SDNode *N) { + return isa<StoreSDNode>(N) && + cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; + } +} + + +} // end llvm namespace + +#endif diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h new file mode 100644 index 0000000..95c3a11 --- /dev/null +++ b/include/llvm/CodeGen/ValueTypes.h @@ -0,0 +1,481 @@ +//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the set of low-level target independent types which various +// values in the code generator are. This allows the target specific behavior +// of instructions to be described to target independent passes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_VALUETYPES_H +#define LLVM_CODEGEN_VALUETYPES_H + +#include <cassert> +#include <string> +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/MathExtras.h" + +namespace llvm { + class Type; + + struct MVT { // MVT = Machine Value Type + public: + enum SimpleValueType { + // If you change this numbering, you must change the values in + // ValueTypes.td as well! + Other = 0, // This is a non-standard value + i1 = 1, // This is a 1 bit integer value + i8 = 2, // This is an 8 bit integer value + i16 = 3, // This is a 16 bit integer value + i32 = 4, // This is a 32 bit integer value + i64 = 5, // This is a 64 bit integer value + i128 = 6, // This is a 128 bit integer value + + FIRST_INTEGER_VALUETYPE = i1, + LAST_INTEGER_VALUETYPE = i128, + + f32 = 7, // This is a 32 bit floating point value + f64 = 8, // This is a 64 bit floating point value + f80 = 9, // This is a 80 bit floating point value + f128 = 10, // This is a 128 bit floating point value + ppcf128 = 11, // This is a PPC 128-bit floating point value + Flag = 12, // This is a condition code or machine flag. + + isVoid = 13, // This has no value + + v2i8 = 14, // 2 x i8 + v4i8 = 15, // 4 x i8 + v2i16 = 16, // 2 x i16 + v8i8 = 17, // 8 x i8 + v4i16 = 18, // 4 x i16 + v2i32 = 19, // 2 x i32 + v1i64 = 20, // 1 x i64 + v16i8 = 21, // 16 x i8 + v8i16 = 22, // 8 x i16 + v3i32 = 23, // 3 x i32 + v4i32 = 24, // 4 x i32 + v2i64 = 25, // 2 x i64 + + v2f32 = 26, // 2 x f32 + v3f32 = 27, // 3 x f32 + v4f32 = 28, // 4 x f32 + v2f64 = 29, // 2 x f64 + + FIRST_VECTOR_VALUETYPE = v2i8, + LAST_VECTOR_VALUETYPE = v2f64, + + LAST_VALUETYPE = 30, // This always remains at the end of the list. + + // iPTRAny - An int value the size of the pointer of the current + // target to any address space. This must only be used internal to + // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR. + iPTRAny = 252, + + // fAny - Any floating-point or vector floating-point value. This is used + // for intrinsics that have overloadings based on floating-point types. + // This is only for tblgen's consumption! + fAny = 253, + + // iAny - An integer or vector integer value of any bit width. This is + // used for intrinsics that have overloadings based on integer bit widths. + // This is only for tblgen's consumption! + iAny = 254, + + // iPTR - An int value the size of the pointer of the current + // target. This should only be used internal to tblgen! + iPTR = 255, + + // LastSimpleValueType - The greatest valid SimpleValueType value. + LastSimpleValueType = 255 + }; + + private: + /// This union holds low-level value types. Valid values include any of + /// the values in the SimpleValueType enum, or any value returned from one + /// of the MVT methods. Any value type equal to one of the SimpleValueType + /// enum values is a "simple" value type. All others are "extended". + /// + /// Note that simple doesn't necessary mean legal for the target machine. + /// All legal value types must be simple, but often there are some simple + /// value types that are not legal. + /// + union { + uintptr_t V; + const Type *LLVMTy; + }; + + public: + MVT() {} + MVT(SimpleValueType S) : V(S) {} + + bool operator==(const MVT VT) const { + return getRawBits() == VT.getRawBits(); + } + bool operator!=(const MVT VT) const { + return getRawBits() != VT.getRawBits(); + } + + /// getFloatingPointVT - Returns the MVT that represents a floating point + /// type with the given number of bits. There are two floating point types + /// with 128 bits - this returns f128 rather than ppcf128. + static MVT getFloatingPointVT(unsigned BitWidth) { + switch (BitWidth) { + default: + assert(false && "Bad bit width!"); + case 32: + return f32; + case 64: + return f64; + case 80: + return f80; + case 128: + return f128; + } + } + + /// getIntegerVT - Returns the MVT that represents an integer with the given + /// number of bits. + static MVT getIntegerVT(unsigned BitWidth) { + switch (BitWidth) { + default: + break; + case 1: + return i1; + case 8: + return i8; + case 16: + return i16; + case 32: + return i32; + case 64: + return i64; + case 128: + return i128; + } + return getExtendedIntegerVT(BitWidth); + } + + /// getVectorVT - Returns the MVT that represents a vector NumElements in + /// length, where each element is of type VT. + static MVT getVectorVT(MVT VT, unsigned NumElements) { + switch (VT.V) { + default: + break; + case i8: + if (NumElements == 2) return v2i8; + if (NumElements == 4) return v4i8; + if (NumElements == 8) return v8i8; + if (NumElements == 16) return v16i8; + break; + case i16: + if (NumElements == 2) return v2i16; + if (NumElements == 4) return v4i16; + if (NumElements == 8) return v8i16; + break; + case i32: + if (NumElements == 2) return v2i32; + if (NumElements == 3) return v3i32; + if (NumElements == 4) return v4i32; + break; + case i64: + if (NumElements == 1) return v1i64; + if (NumElements == 2) return v2i64; + break; + case f32: + if (NumElements == 2) return v2f32; + if (NumElements == 3) return v3f32; + if (NumElements == 4) return v4f32; + break; + case f64: + if (NumElements == 2) return v2f64; + break; + } + return getExtendedVectorVT(VT, NumElements); + } + + /// getIntVectorWithNumElements - Return any integer vector type that has + /// the specified number of elements. + static MVT getIntVectorWithNumElements(unsigned NumElts) { + switch (NumElts) { + default: return getVectorVT(i8, NumElts); + case 1: return v1i64; + case 2: return v2i32; + case 3: return v3i32; + case 4: return v4i16; + case 8: return v8i8; + case 16: return v16i8; + } + } + + /// isSimple - Test if the given MVT is simple (as opposed to being + /// extended). + bool isSimple() const { + return V <= LastSimpleValueType; + } + + /// isExtended - Test if the given MVT is extended (as opposed to + /// being simple). + bool isExtended() const { + return !isSimple(); + } + + /// isFloatingPoint - Return true if this is a FP, or a vector FP type. + bool isFloatingPoint() const { + return isSimple() ? + ((V >= f32 && V <= ppcf128) || (V >= v2f32 && V <= v2f64)) : + isExtendedFloatingPoint(); + } + + /// isInteger - Return true if this is an integer, or a vector integer type. + bool isInteger() const { + return isSimple() ? + ((V >= FIRST_INTEGER_VALUETYPE && V <= LAST_INTEGER_VALUETYPE) || + (V >= v2i8 && V <= v2i64)) : isExtendedInteger(); + } + + /// isVector - Return true if this is a vector value type. + bool isVector() const { + return isSimple() ? + (V >= FIRST_VECTOR_VALUETYPE && V <= LAST_VECTOR_VALUETYPE) : + isExtendedVector(); + } + + /// is64BitVector - Return true if this is a 64-bit vector type. + bool is64BitVector() const { + return isSimple() ? + (V==v8i8 || V==v4i16 || V==v2i32 || V==v1i64 || V==v2f32) : + isExtended64BitVector(); + } + + /// is128BitVector - Return true if this is a 128-bit vector type. + bool is128BitVector() const { + return isSimple() ? + (V==v16i8 || V==v8i16 || V==v4i32 || + V==v2i64 || V==v4f32 || V==v2f64) : + isExtended128BitVector(); + } + + /// isByteSized - Return true if the bit size is a multiple of 8. + bool isByteSized() const { + return (getSizeInBits() & 7) == 0; + } + + /// isRound - Return true if the size is a power-of-two number of bytes. + bool isRound() const { + unsigned BitSize = getSizeInBits(); + return BitSize >= 8 && !(BitSize & (BitSize - 1)); + } + + /// bitsEq - Return true if this has the same number of bits as VT. + bool bitsEq(MVT VT) const { + return getSizeInBits() == VT.getSizeInBits(); + } + + /// bitsGT - Return true if this has more bits than VT. + bool bitsGT(MVT VT) const { + return getSizeInBits() > VT.getSizeInBits(); + } + + /// bitsGE - Return true if this has no less bits than VT. + bool bitsGE(MVT VT) const { + return getSizeInBits() >= VT.getSizeInBits(); + } + + /// bitsLT - Return true if this has less bits than VT. + bool bitsLT(MVT VT) const { + return getSizeInBits() < VT.getSizeInBits(); + } + + /// bitsLE - Return true if this has no more bits than VT. + bool bitsLE(MVT VT) const { + return getSizeInBits() <= VT.getSizeInBits(); + } + + + /// getSimpleVT - Return the SimpleValueType held in the specified + /// simple MVT. + SimpleValueType getSimpleVT() const { + assert(isSimple() && "Expected a SimpleValueType!"); + return SimpleValueType(V); + } + + /// getVectorElementType - Given a vector type, return the type of + /// each element. + MVT getVectorElementType() const { + assert(isVector() && "Invalid vector type!"); + switch (V) { + default: + return getExtendedVectorElementType(); + case v2i8 : + case v4i8 : + case v8i8 : + case v16i8: return i8; + case v2i16: + case v4i16: + case v8i16: return i16; + case v2i32: + case v3i32: + case v4i32: return i32; + case v1i64: + case v2i64: return i64; + case v2f32: + case v3f32: + case v4f32: return f32; + case v2f64: return f64; + } + } + + /// getVectorNumElements - Given a vector type, return the number of + /// elements it contains. + unsigned getVectorNumElements() const { + assert(isVector() && "Invalid vector type!"); + switch (V) { + default: + return getExtendedVectorNumElements(); + case v16i8: return 16; + case v8i8 : + case v8i16: return 8; + case v4i8: + case v4i16: + case v4i32: + case v4f32: return 4; + case v3i32: + case v3f32: return 3; + case v2i8: + case v2i16: + case v2i32: + case v2i64: + case v2f32: + case v2f64: return 2; + case v1i64: return 1; + } + } + + /// getSizeInBits - Return the size of the specified value type in bits. + unsigned getSizeInBits() const { + switch (V) { + case iPTR: + assert(0 && "Value type size is target-dependent. Ask TLI."); + case iPTRAny: + case iAny: + case fAny: + assert(0 && "Value type is overloaded."); + default: + return getExtendedSizeInBits(); + case i1 : return 1; + case i8 : return 8; + case i16 : + case v2i8: return 16; + case f32 : + case i32 : + case v4i8: + case v2i16: return 32; + case f64 : + case i64 : + case v8i8: + case v4i16: + case v2i32: + case v1i64: + case v2f32: return 64; + case f80 : return 80; + case v3i32: + case v3f32: return 96; + case f128: + case ppcf128: + case i128: + case v16i8: + case v8i16: + case v4i32: + case v2i64: + case v4f32: + case v2f64: return 128; + } + } + + /// getStoreSizeInBits - Return the number of bits overwritten by a store + /// of the specified value type. + unsigned getStoreSizeInBits() const { + return (getSizeInBits() + 7)/8*8; + } + + /// getRoundIntegerType - Rounds the bit-width of the given integer MVT up + /// to the nearest power of two (and at least to eight), and returns the + /// integer MVT with that number of bits. + MVT getRoundIntegerType() const { + assert(isInteger() && !isVector() && "Invalid integer type!"); + unsigned BitWidth = getSizeInBits(); + if (BitWidth <= 8) + return i8; + else + return getIntegerVT(1 << Log2_32_Ceil(BitWidth)); + } + + /// isPow2VectorType - Retuns true if the given vector is a power of 2. + bool isPow2VectorType() const { + unsigned NElts = getVectorNumElements(); + return !(NElts & (NElts - 1)); + } + + /// getPow2VectorType - Widens the length of the given vector MVT up to + /// the nearest power of 2 and returns that type. + MVT getPow2VectorType() const { + if (!isPow2VectorType()) { + unsigned NElts = getVectorNumElements(); + unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts); + return MVT::getVectorVT(getVectorElementType(), Pow2NElts); + } + else { + return *this; + } + } + + /// getMVTString - This function returns value type as a string, + /// e.g. "i32". + std::string getMVTString() const; + + /// getTypeForMVT - This method returns an LLVM type corresponding to the + /// specified MVT. For integer types, this returns an unsigned type. Note + /// that this will abort for types that cannot be represented. + const Type *getTypeForMVT() const; + + /// getMVT - Return the value type corresponding to the specified type. + /// This returns all pointers as iPTR. If HandleUnknown is true, unknown + /// types are returned as Other, otherwise they are invalid. + static MVT getMVT(const Type *Ty, bool HandleUnknown = false); + + /// getRawBits - Represent the type as a bunch of bits. + uintptr_t getRawBits() const { return V; } + + /// compareRawBits - A meaningless but well-behaved order, useful for + /// constructing containers. + struct compareRawBits { + bool operator()(MVT L, MVT R) const { + return L.getRawBits() < R.getRawBits(); + } + }; + + private: + // Methods for handling the Extended-type case in functions above. + // These are all out-of-line to prevent users of this header file + // from having a dependency on Type.h. + static MVT getExtendedIntegerVT(unsigned BitWidth); + static MVT getExtendedVectorVT(MVT VT, unsigned NumElements); + bool isExtendedFloatingPoint() const; + bool isExtendedInteger() const; + bool isExtendedVector() const; + bool isExtended64BitVector() const; + bool isExtended128BitVector() const; + MVT getExtendedVectorElementType() const; + unsigned getExtendedVectorNumElements() const; + unsigned getExtendedSizeInBits() const; + }; + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td new file mode 100644 index 0000000..53ed0be --- /dev/null +++ b/include/llvm/CodeGen/ValueTypes.td @@ -0,0 +1,66 @@ +//===- ValueTypes.td - ValueType definitions ---------------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Value types - These values correspond to the register types defined in the +// ValueTypes.h file. If you update anything here, you must update it there as +// well! +// +//===----------------------------------------------------------------------===// + +class ValueType<int size, int value> { + string Namespace = "MVT"; + int Size = size; + int Value = value; +} + +def OtherVT: ValueType<0 , 0>; // "Other" value +def i1 : ValueType<1 , 1>; // One bit boolean value +def i8 : ValueType<8 , 2>; // 8-bit integer value +def i16 : ValueType<16 , 3>; // 16-bit integer value +def i32 : ValueType<32 , 4>; // 32-bit integer value +def i64 : ValueType<64 , 5>; // 64-bit integer value +def i128 : ValueType<128, 6>; // 128-bit integer value +def f32 : ValueType<32 , 7>; // 32-bit floating point value +def f64 : ValueType<64 , 8>; // 64-bit floating point value +def f80 : ValueType<80 , 9>; // 80-bit floating point value +def f128 : ValueType<128, 10>; // 128-bit floating point value +def ppcf128: ValueType<128, 11>; // PPC 128-bit floating point value +def FlagVT : ValueType<0 , 12>; // Condition code or machine flag +def isVoid : ValueType<0 , 13>; // Produces no value +def v2i8 : ValueType<16 , 14>; // 2 x i8 vector value +def v4i8 : ValueType<32 , 15>; // 4 x i8 vector value +def v2i16 : ValueType<32 , 16>; // 2 x i16 vector value +def v8i8 : ValueType<64 , 17>; // 8 x i8 vector value +def v4i16 : ValueType<64 , 18>; // 4 x i16 vector value +def v2i32 : ValueType<64 , 19>; // 2 x i32 vector value +def v1i64 : ValueType<64 , 20>; // 1 x i64 vector value + +def v16i8 : ValueType<128, 21>; // 16 x i8 vector value +def v8i16 : ValueType<128, 22>; // 8 x i16 vector value +def v3i32 : ValueType<96 , 23>; // 3 x i32 vector value +def v4i32 : ValueType<128, 24>; // 4 x i32 vector value +def v2i64 : ValueType<128, 25>; // 2 x i64 vector value + +def v2f32 : ValueType<64, 26>; // 2 x f32 vector value +def v3f32 : ValueType<96 , 27>; // 3 x f32 vector value +def v4f32 : ValueType<128, 28>; // 4 x f32 vector value +def v2f64 : ValueType<128, 29>; // 2 x f64 vector value + +// Pseudo valuetype mapped to the current pointer size to any address space. +// Should only be used in TableGen. +def iPTRAny : ValueType<0, 252>; + +// Pseudo valuetype to represent "float of any format" +def fAny : ValueType<0 , 253>; + +// Pseudo valuetype to represent "integer of any bit width" +def iAny : ValueType<0 , 254>; + +// Pseudo valuetype mapped to the current pointer size. +def iPTR : ValueType<0 , 255>; diff --git a/include/llvm/CompilerDriver/Action.h b/include/llvm/CompilerDriver/Action.h new file mode 100644 index 0000000..7014139 --- /dev/null +++ b/include/llvm/CompilerDriver/Action.h @@ -0,0 +1,50 @@ +//===--- Action.h - The LLVM Compiler Driver --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Action - encapsulates a single shell command. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INCLUDE_COMPILER_DRIVER_ACTION_H +#define LLVM_INCLUDE_COMPILER_DRIVER_ACTION_H + +#include <string> +#include <vector> + +namespace llvmc { + + typedef std::vector<std::string> StrVector; + + /// Action - A class that encapsulates a single shell command. + class Action { + /// Command_ - The actual command (for example, 'ls'). + std::string Command_; + /// Args_ - Command arguments. Stdout redirection ("> file") is allowed. + std::vector<std::string> Args_; + /// StopCompilation_ - Should we stop compilation after executing + /// this action? + bool StopCompilation_; + /// OutFile_ - The output file name. + std::string OutFile_; + + public: + Action (const std::string& C, const StrVector& A, + bool S, const std::string& O) + : Command_(C), Args_(A), StopCompilation_(S), OutFile_(O) + {} + + /// Execute - Executes the represented action. + int Execute () const; + bool StopCompilation () const { return StopCompilation_; } + const std::string& OutFile() { return OutFile_; } + }; + +} + +#endif // LLVM_INCLUDE_COMPILER_DRIVER_ACTION_H diff --git a/include/llvm/CompilerDriver/Common.td b/include/llvm/CompilerDriver/Common.td new file mode 100644 index 0000000..1f6bacc --- /dev/null +++ b/include/llvm/CompilerDriver/Common.td @@ -0,0 +1,118 @@ +//===- Common.td - Common definitions for LLVMC2 ----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains common definitions used in llvmc2 tool description files. +// +//===----------------------------------------------------------------------===// + +class Tool<list<dag> l> { + list<dag> properties = l; +} + +// Possible Tool properties. + +def in_language; +def out_language; +def output_suffix; +def cmd_line; +def join; +def sink; +def actions; + +// Possible option types. + +def alias_option; +def switch_option; +def parameter_option; +def parameter_list_option; +def prefix_option; +def prefix_list_option; + +// Possible option properties. + +def extern; +def help; +def hidden; +def multi_val; +def one_or_more; +def really_hidden; +def required; +def zero_or_one; + +// Empty DAG marker. +def empty; + +// The 'case' construct. +def case; + +// Boolean operators. +def and; +def or; + +// Primitive tests. +def switch_on; +def parameter_equals; +def element_in_list; +def input_languages_contain; +def not_empty; +def default; + +// Possible actions. + +def append_cmd; +def forward; +def forward_as; +def stop_compilation; +def unpack_values; +def error; + +// Increase/decrease the edge weight. +def inc_weight; +def dec_weight; + +// Used to specify plugin priority. +class PluginPriority<int p> { + int priority = p; +} + +// Option list - used to specify aliases and sometimes help strings. +class OptionList<list<dag> l> { + list<dag> options = l; +} + +// Map from suffixes to language names + +class LangToSuffixes<string str, list<string> lst> { + string lang = str; + list<string> suffixes = lst; +} + +class LanguageMap<list<LangToSuffixes> lst> { + list<LangToSuffixes> map = lst; +} + +// Compilation graph + +class EdgeBase<string t1, string t2, dag d> { + string a = t1; + string b = t2; + dag weight = d; +} + +class Edge<string t1, string t2> : EdgeBase<t1, t2, (empty)>; + +// Edge and SimpleEdge are synonyms. +class SimpleEdge<string t1, string t2> : EdgeBase<t1, t2, (empty)>; + +// Optionally enabled edge. +class OptionalEdge<string t1, string t2, dag props> : EdgeBase<t1, t2, props>; + +class CompilationGraph<list<EdgeBase> lst> { + list<EdgeBase> edges = lst; +} diff --git a/include/llvm/CompilerDriver/CompilationGraph.h b/include/llvm/CompilerDriver/CompilationGraph.h new file mode 100644 index 0000000..825d4c4 --- /dev/null +++ b/include/llvm/CompilerDriver/CompilationGraph.h @@ -0,0 +1,325 @@ +//===--- CompilationGraph.h - The LLVM Compiler Driver ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Compilation graph - definition. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INCLUDE_COMPILER_DRIVER_COMPILATION_GRAPH_H +#define LLVM_INCLUDE_COMPILER_DRIVER_COMPILATION_GRAPH_H + +#include "llvm/CompilerDriver/Tool.h" + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/IntrusiveRefCntPtr.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/System/Path.h" + +#include <cassert> +#include <string> + +namespace llvmc { + + class CompilationGraph; + typedef llvm::StringSet<> InputLanguagesSet; + + /// LanguageMap - Maps from extensions to language names. + class LanguageMap : public llvm::StringMap<std::string> { + public: + + /// GetLanguage - Find the language name corresponding to a given file. + const std::string& GetLanguage(const llvm::sys::Path&) const; + }; + + /// Edge - Represents an edge of the compilation graph. + class Edge : public llvm::RefCountedBaseVPTR<Edge> { + public: + Edge(const std::string& T) : ToolName_(T) {} + virtual ~Edge() {}; + + const std::string& ToolName() const { return ToolName_; } + virtual unsigned Weight(const InputLanguagesSet& InLangs) const = 0; + private: + std::string ToolName_; + }; + + /// SimpleEdge - An edge that has no properties. + class SimpleEdge : public Edge { + public: + SimpleEdge(const std::string& T) : Edge(T) {} + unsigned Weight(const InputLanguagesSet&) const { return 1; } + }; + + /// Node - A node (vertex) of the compilation graph. + struct Node { + // A Node holds a list of the outward edges. + typedef llvm::SmallVector<llvm::IntrusiveRefCntPtr<Edge>, 3> container_type; + typedef container_type::iterator iterator; + typedef container_type::const_iterator const_iterator; + + Node() : OwningGraph(0), InEdges(0) {} + Node(CompilationGraph* G) : OwningGraph(G), InEdges(0) {} + Node(CompilationGraph* G, Tool* T) : + OwningGraph(G), ToolPtr(T), InEdges(0) {} + + bool HasChildren() const { return !OutEdges.empty(); } + const std::string Name() const + { return ToolPtr ? ToolPtr->Name() : "root"; } + + // Iteration. + iterator EdgesBegin() { return OutEdges.begin(); } + const_iterator EdgesBegin() const { return OutEdges.begin(); } + iterator EdgesEnd() { return OutEdges.end(); } + const_iterator EdgesEnd() const { return OutEdges.end(); } + + /// AddEdge - Add an outward edge. Takes ownership of the provided + /// Edge object. + void AddEdge(Edge* E); + + // Inward edge counter. Used to implement topological sort. + void IncrInEdges() { ++InEdges; } + void DecrInEdges() { --InEdges; } + bool HasNoInEdges() const { return InEdges == 0; } + + // Needed to implement NodeChildIterator/GraphTraits + CompilationGraph* OwningGraph; + // The corresponding Tool. + // WARNING: ToolPtr can be NULL (for the root node). + llvm::IntrusiveRefCntPtr<Tool> ToolPtr; + // Links to children. + container_type OutEdges; + // Inward edge counter. Updated in + // CompilationGraph::insertEdge(). Used for topological sorting. + unsigned InEdges; + }; + + class NodesIterator; + + /// CompilationGraph - The compilation graph itself. + class CompilationGraph { + /// nodes_map_type - The main data structure. + typedef llvm::StringMap<Node> nodes_map_type; + /// tools_vector_type, tools_map_type - Data structures used to + /// map from language names to tools. (We can have several tools + /// associated with each language name, hence the need for a + /// vector.) + typedef + llvm::SmallVector<llvm::IntrusiveRefCntPtr<Edge>, 3> tools_vector_type; + typedef llvm::StringMap<tools_vector_type> tools_map_type; + + /// ToolsMap - Map from language names to lists of tool names. + tools_map_type ToolsMap; + /// NodesMap - Map from tool names to Tool objects. + nodes_map_type NodesMap; + + public: + + typedef nodes_map_type::iterator nodes_iterator; + typedef nodes_map_type::const_iterator const_nodes_iterator; + + CompilationGraph(); + + /// insertNode - Insert a new node into the graph. Takes + /// ownership of the object. + void insertNode(Tool* T); + + /// insertEdge - Insert a new edge into the graph. Takes ownership + /// of the Edge object. + void insertEdge(const std::string& A, Edge* E); + + /// Build - Build target(s) from the input file set. Command-line + /// options are passed implicitly as global variables. + int Build(llvm::sys::Path const& TempDir, const LanguageMap& LangMap); + + /// Check - Check the compilation graph for common errors like + /// cycles, input/output language mismatch and multiple default + /// edges. Prints error messages and in case it finds any errors. + int Check(); + + /// getNode - Return a reference to the node correponding to the + /// given tool name. Throws std::runtime_error. + Node& getNode(const std::string& ToolName); + const Node& getNode(const std::string& ToolName) const; + + /// viewGraph - This function is meant for use from the debugger. + /// You can just say 'call G->viewGraph()' and a ghostview window + /// should pop up from the program, displaying the compilation + /// graph. This depends on there being a 'dot' and 'gv' program + /// in your path. + void viewGraph(); + + /// writeGraph - Write Graphviz .dot source file to the current direcotry. + void writeGraph(const std::string& OutputFilename); + + // GraphTraits support. + friend NodesIterator GraphBegin(CompilationGraph*); + friend NodesIterator GraphEnd(CompilationGraph*); + + private: + // Helper functions. + + /// getToolsVector - Return a reference to the list of tool names + /// corresponding to the given language name. Throws + /// std::runtime_error. + const tools_vector_type& getToolsVector(const std::string& LangName) const; + + /// PassThroughGraph - Pass the input file through the toolchain + /// starting at StartNode. + void PassThroughGraph (const llvm::sys::Path& In, const Node* StartNode, + const InputLanguagesSet& InLangs, + const llvm::sys::Path& TempDir, + const LanguageMap& LangMap) const; + + /// FindToolChain - Find head of the toolchain corresponding to + /// the given file. + const Node* FindToolChain(const llvm::sys::Path& In, + const std::string* ForceLanguage, + InputLanguagesSet& InLangs, + const LanguageMap& LangMap) const; + + /// BuildInitial - Traverse the initial parts of the toolchains. + void BuildInitial(InputLanguagesSet& InLangs, + const llvm::sys::Path& TempDir, + const LanguageMap& LangMap); + + /// TopologicalSort - Sort the nodes in topological order. + void TopologicalSort(std::vector<const Node*>& Out); + /// TopologicalSortFilterJoinNodes - Call TopologicalSort and + /// filter the resulting list to include only Join nodes. + void TopologicalSortFilterJoinNodes(std::vector<const Node*>& Out); + + // Functions used to implement Check(). + + /// CheckLanguageNames - Check that output/input language names + /// match for all nodes. + int CheckLanguageNames() const; + /// CheckMultipleDefaultEdges - check that there are no multiple + /// default default edges. + int CheckMultipleDefaultEdges() const; + /// CheckCycles - Check that there are no cycles in the graph. + int CheckCycles(); + + }; + + // GraphTraits support code. + + /// NodesIterator - Auxiliary class needed to implement GraphTraits + /// support. Can be generalised to something like value_iterator + /// for map-like containers. + class NodesIterator : public CompilationGraph::nodes_iterator { + typedef CompilationGraph::nodes_iterator super; + typedef NodesIterator ThisType; + typedef Node* pointer; + typedef Node& reference; + + public: + NodesIterator(super I) : super(I) {} + + inline reference operator*() const { + return super::operator->()->second; + } + inline pointer operator->() const { + return &super::operator->()->second; + } + }; + + inline NodesIterator GraphBegin(CompilationGraph* G) { + return NodesIterator(G->NodesMap.begin()); + } + + inline NodesIterator GraphEnd(CompilationGraph* G) { + return NodesIterator(G->NodesMap.end()); + } + + + /// NodeChildIterator - Another auxiliary class needed by GraphTraits. + class NodeChildIterator : public bidirectional_iterator<Node, ptrdiff_t> { + typedef NodeChildIterator ThisType; + typedef Node::container_type::iterator iterator; + + CompilationGraph* OwningGraph; + iterator EdgeIter; + public: + typedef Node* pointer; + typedef Node& reference; + + NodeChildIterator(Node* N, iterator I) : + OwningGraph(N->OwningGraph), EdgeIter(I) {} + + const ThisType& operator=(const ThisType& I) { + assert(OwningGraph == I.OwningGraph); + EdgeIter = I.EdgeIter; + return *this; + } + + inline bool operator==(const ThisType& I) const { + assert(OwningGraph == I.OwningGraph); + return EdgeIter == I.EdgeIter; + } + inline bool operator!=(const ThisType& I) const { + return !this->operator==(I); + } + + inline pointer operator*() const { + return &OwningGraph->getNode((*EdgeIter)->ToolName()); + } + inline pointer operator->() const { + return this->operator*(); + } + + ThisType& operator++() { ++EdgeIter; return *this; } // Preincrement + ThisType operator++(int) { // Postincrement + ThisType tmp = *this; + ++*this; + return tmp; + } + + inline ThisType& operator--() { --EdgeIter; return *this; } // Predecrement + inline ThisType operator--(int) { // Postdecrement + ThisType tmp = *this; + --*this; + return tmp; + } + + }; +} + +namespace llvm { + template <> + struct GraphTraits<llvmc::CompilationGraph*> { + typedef llvmc::CompilationGraph GraphType; + typedef llvmc::Node NodeType; + typedef llvmc::NodeChildIterator ChildIteratorType; + + static NodeType* getEntryNode(GraphType* G) { + return &G->getNode("root"); + } + + static ChildIteratorType child_begin(NodeType* N) { + return ChildIteratorType(N, N->OutEdges.begin()); + } + static ChildIteratorType child_end(NodeType* N) { + return ChildIteratorType(N, N->OutEdges.end()); + } + + typedef llvmc::NodesIterator nodes_iterator; + static nodes_iterator nodes_begin(GraphType *G) { + return GraphBegin(G); + } + static nodes_iterator nodes_end(GraphType *G) { + return GraphEnd(G); + } + }; + +} + +#endif // LLVM_INCLUDE_COMPILER_DRIVER_COMPILATION_GRAPH_H diff --git a/include/llvm/CompilerDriver/Error.h b/include/llvm/CompilerDriver/Error.h new file mode 100644 index 0000000..fa678cf --- /dev/null +++ b/include/llvm/CompilerDriver/Error.h @@ -0,0 +1,35 @@ +//===--- Error.h - The LLVM Compiler Driver ---------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Exception classes for llvmc. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INCLUDE_COMPILER_DRIVER_ERROR_H +#define LLVM_INCLUDE_COMPILER_DRIVER_ERROR_H + +#include <stdexcept> + +namespace llvmc { + + /// error_code - This gets thrown during the compilation process if a tool + /// invocation returns a non-zero exit code. + class error_code: public std::runtime_error { + int Code_; + public: + error_code (int c) + : std::runtime_error("Tool returned error code"), Code_(c) + {} + + int code() const { return Code_; } + }; + +} + +#endif // LLVM_INCLUDE_COMPILER_DRIVER_ERROR_H diff --git a/include/llvm/CompilerDriver/Main.inc b/include/llvm/CompilerDriver/Main.inc new file mode 100644 index 0000000..2d50c95 --- /dev/null +++ b/include/llvm/CompilerDriver/Main.inc @@ -0,0 +1,139 @@ +//===--- Main.inc - The LLVM Compiler Driver --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This tool provides a single point of access to the LLVM +// compilation tools. It has many options. To discover the options +// supported please refer to the tools' manual page or run the tool +// with the --help option. +// +// This +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INCLUDE_COMPILER_DRIVER_MAIN_INC +#define LLVM_INCLUDE_COMPILER_DRIVER_MAIN_INC + +#include "llvm/CompilerDriver/CompilationGraph.h" +#include "llvm/CompilerDriver/Error.h" +#include "llvm/CompilerDriver/Plugin.h" + +#include "llvm/System/Path.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/PluginLoader.h" + +#include <iostream> +#include <stdexcept> +#include <string> + +namespace cl = llvm::cl; +namespace sys = llvm::sys; +using namespace llvmc; + +// Built-in command-line options. +// External linkage here is intentional. + +cl::list<std::string> InputFilenames(cl::Positional, cl::desc("<input file>"), + cl::ZeroOrMore); +cl::opt<std::string> OutputFilename("o", cl::desc("Output file name"), + cl::value_desc("file"), cl::Prefix); +cl::list<std::string> Languages("x", + cl::desc("Specify the language of the following input files"), + cl::ZeroOrMore); +cl::opt<bool> DryRun("dry-run", + cl::desc("Only pretend to run commands")); +cl::opt<bool> VerboseMode("v", + cl::desc("Enable verbose mode")); + +cl::opt<bool> CheckGraph("check-graph", + cl::desc("Check the compilation graph for errors"), + cl::Hidden); +cl::opt<bool> WriteGraph("write-graph", + cl::desc("Write compilation-graph.dot file"), + cl::Hidden); +cl::opt<bool> ViewGraph("view-graph", + cl::desc("Show compilation graph in GhostView"), + cl::Hidden); +cl::opt<bool> SaveTemps("save-temps", + cl::desc("Keep temporary files"), + cl::Hidden); + +namespace { + /// BuildTargets - A small wrapper for CompilationGraph::Build. + int BuildTargets(CompilationGraph& graph, const LanguageMap& langMap) { + int ret; + const sys::Path& tempDir = SaveTemps + ? sys::Path("") + : sys::Path(sys::Path::GetTemporaryDirectory()); + + try { + ret = graph.Build(tempDir, langMap); + } + catch(...) { + tempDir.eraseFromDisk(true); + throw; + } + + if (!SaveTemps) + tempDir.eraseFromDisk(true); + return ret; + } +} + +int main(int argc, char** argv) { + try { + LanguageMap langMap; + CompilationGraph graph; + + cl::ParseCommandLineOptions + (argc, argv, "LLVM Compiler Driver (Work In Progress)", true); + + PluginLoader Plugins; + Plugins.PopulateLanguageMap(langMap); + Plugins.PopulateCompilationGraph(graph); + + if (CheckGraph) { + int ret = graph.Check(); + if (!ret) + std::cerr << "check-graph: no errors found.\n"; + + return ret; + } + + if (ViewGraph) { + graph.viewGraph(); + if (!WriteGraph) + return 0; + } + + if (WriteGraph) { + graph.writeGraph(OutputFilename.empty() + ? std::string("compilation-graph.dot") + : OutputFilename); + return 0; + } + + if (InputFilenames.empty()) { + throw std::runtime_error("no input files"); + } + + return BuildTargets(graph, langMap); + } + catch(llvmc::error_code& ec) { + return ec.code(); + } + catch(const std::exception& ex) { + std::cerr << argv[0] << ": " << ex.what() << '\n'; + } + catch(...) { + std::cerr << argv[0] << ": unknown error!\n"; + } + return 1; +} + +#endif // LLVM_INCLUDE_COMPILER_DRIVER_MAIN_INC diff --git a/include/llvm/CompilerDriver/Plugin.h b/include/llvm/CompilerDriver/Plugin.h new file mode 100644 index 0000000..9f9eee3 --- /dev/null +++ b/include/llvm/CompilerDriver/Plugin.h @@ -0,0 +1,79 @@ +//===--- Plugin.h - The LLVM Compiler Driver --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Plugin support for llvmc. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INCLUDE_COMPILER_DRIVER_PLUGIN_H +#define LLVM_INCLUDE_COMPILER_DRIVER_PLUGIN_H + +#include "llvm/Support/Registry.h" + +namespace llvmc { + + class LanguageMap; + class CompilationGraph; + + /// BasePlugin - An abstract base class for all LLVMC plugins. + struct BasePlugin { + + /// Priority - Plugin priority, useful for handling dependencies + /// between plugins. Plugins with lower priorities are loaded + /// first. + virtual int Priority() const { return 0; } + + /// PopulateLanguageMap - The auto-generated function that fills in + /// the language map (map from file extensions to language names). + virtual void PopulateLanguageMap(LanguageMap&) const = 0; + + /// PopulateCompilationGraph - The auto-generated function that + /// populates the compilation graph with nodes and edges. + virtual void PopulateCompilationGraph(CompilationGraph&) const = 0; + + /// Needed to avoid a compiler warning. + virtual ~BasePlugin() {} + }; + + typedef llvm::Registry<BasePlugin> PluginRegistry; + + template <class P> + struct RegisterPlugin + : public PluginRegistry::Add<P> { + typedef PluginRegistry::Add<P> Base; + + RegisterPlugin(const char* Name = "Nameless", + const char* Desc = "Auto-generated plugin") + : Base(Name, Desc) {} + }; + + + /// PluginLoader - Helper class used by the main program for + /// lifetime management. + struct PluginLoader { + PluginLoader(); + ~PluginLoader(); + + /// PopulateLanguageMap - Fills in the language map by calling + /// PopulateLanguageMap methods of all plugins. + void PopulateLanguageMap(LanguageMap& langMap); + + /// PopulateCompilationGraph - Populates the compilation graph by + /// calling PopulateCompilationGraph methods of all plugins. + void PopulateCompilationGraph(CompilationGraph& tools); + + private: + // noncopyable + PluginLoader(const PluginLoader& other); + const PluginLoader& operator=(const PluginLoader& other); + }; + +} + +#endif // LLVM_INCLUDE_COMPILER_DRIVER_PLUGIN_H diff --git a/include/llvm/CompilerDriver/Tool.h b/include/llvm/CompilerDriver/Tool.h new file mode 100644 index 0000000..a982e2d --- /dev/null +++ b/include/llvm/CompilerDriver/Tool.h @@ -0,0 +1,85 @@ +//===--- Tool.h - The LLVM Compiler Driver ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open +// Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Tool abstract base class - an interface to tool descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INCLUDE_COMPILER_DRIVER_TOOL_H +#define LLVM_INCLUDE_COMPILER_DRIVER_TOOL_H + +#include "llvm/CompilerDriver/Action.h" + +#include "llvm/ADT/IntrusiveRefCntPtr.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/System/Path.h" + +#include <vector> + +namespace llvmc { + + class LanguageMap; + typedef std::vector<llvm::sys::Path> PathVector; + typedef llvm::StringSet<> InputLanguagesSet; + + /// Tool - A class + class Tool : public llvm::RefCountedBaseVPTR<Tool> { + public: + + virtual ~Tool() {} + + virtual Action GenerateAction (const PathVector& inFiles, + bool HasChildren, + const llvm::sys::Path& TempDir, + const InputLanguagesSet& InLangs, + const LanguageMap& LangMap) const = 0; + + virtual Action GenerateAction (const llvm::sys::Path& inFile, + bool HasChildren, + const llvm::sys::Path& TempDir, + const InputLanguagesSet& InLangs, + const LanguageMap& LangMap) const = 0; + + virtual const char* Name() const = 0; + virtual const char** InputLanguages() const = 0; + virtual const char* OutputLanguage() const = 0; + + virtual bool IsJoin() const = 0; + + protected: + /// OutFileName - Generate the output file name. + llvm::sys::Path OutFilename(const llvm::sys::Path& In, + const llvm::sys::Path& TempDir, + bool StopCompilation, + const char* OutputSuffix) const; + }; + + /// JoinTool - A Tool that has an associated input file list. + class JoinTool : public Tool { + public: + void AddToJoinList(const llvm::sys::Path& P) { JoinList_.push_back(P); } + void ClearJoinList() { JoinList_.clear(); } + bool JoinListEmpty() const { return JoinList_.empty(); } + + Action GenerateAction(bool HasChildren, + const llvm::sys::Path& TempDir, + const InputLanguagesSet& InLangs, + const LanguageMap& LangMap) const { + return GenerateAction(JoinList_, HasChildren, TempDir, InLangs, LangMap); + } + // We shouldn't shadow base class's version of GenerateAction. + using Tool::GenerateAction; + + private: + PathVector JoinList_; + }; + +} + +#endif // LLVM_INCLUDE_COMPILER_DRIVER_TOOL_H diff --git a/include/llvm/Config/alloca.h b/include/llvm/Config/alloca.h new file mode 100644 index 0000000..9990507 --- /dev/null +++ b/include/llvm/Config/alloca.h @@ -0,0 +1,49 @@ +/* + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + ****************************************************************************** + * + * Description: + * This header file includes the infamous alloc.h header file if the + * autoconf system has found it. It hides all of the autoconf details + * from the rest of the application source code. + */ + +#ifndef _CONFIG_ALLOC_H +#define _CONFIG_ALLOC_H + +#include "llvm/Config/config.h" + +/* + * This is a modified version of that suggested by the Autoconf manual. + * 1) The #pragma is indented so that pre-ANSI C compilers ignore it. + * 2) If alloca.h cannot be found, then try stdlib.h. Some platforms + * (notably FreeBSD) defined alloca() there. + */ +#ifdef _MSC_VER +#include <malloc.h> +#define alloca _alloca +#elif defined(HAVE_ALLOCA_H) +#include <alloca.h> +#elif defined(__MINGW32__) && defined(HAVE_MALLOC_H) +#include <malloc.h> +#elif !defined(__GNUC__) +# ifdef _AIX +# pragma alloca +# else +# ifndef alloca + char * alloca (); +# endif +# endif +#else +# ifdef HAVE_STDLIB_H +# include <stdlib.h> +# else +# error "The function alloca() is required but not found!" +# endif +#endif + +#endif diff --git a/include/llvm/Config/config.h.cmake b/include/llvm/Config/config.h.cmake new file mode 100644 index 0000000..caabf07 --- /dev/null +++ b/include/llvm/Config/config.h.cmake @@ -0,0 +1,582 @@ + +/************************************** +** Created by Kevin from config.h.in ** +***************************************/ + +/* Define if dlopen(0) will open the symbols of the program */ +#undef CAN_DLOPEN_SELF + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +#undef CRAY_STACKSEG_END + +/* Define to 1 if using `alloca.c'. */ +#undef C_ALLOCA + +/* Define if CBE is enabled for printf %a output */ +#undef ENABLE_CBE_PRINTF_A + +/* Define if position independent code is enabled */ +#undef ENABLE_PIC + +/* Define if threads enabled */ +#cmakedefine ENABLE_THREADS ${ENABLE_THREADS} + +/* Define to 1 if you have `alloca', as a function or macro. */ +#undef HAVE_ALLOCA + +/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix). + */ +#undef HAVE_ALLOCA_H + +/* Define to 1 if you have the `argz_append' function. */ +#undef HAVE_ARGZ_APPEND + +/* Define to 1 if you have the `argz_create_sep' function. */ +#undef HAVE_ARGZ_CREATE_SEP + +/* Define to 1 if you have the <argz.h> header file. */ +#cmakedefine HAVE_ARGZ_H ${HAVE_ARGZ_H} + +/* Define to 1 if you have the `argz_insert' function. */ +#undef HAVE_ARGZ_INSERT + +/* Define to 1 if you have the `argz_next' function. */ +#undef HAVE_ARGZ_NEXT + +/* Define to 1 if you have the `argz_stringify' function. */ +#undef HAVE_ARGZ_STRINGIFY + +/* Define to 1 if you have the <assert.h> header file. */ +#cmakedefine HAVE_ASSERT_H ${HAVE_ASSERT_H} + +/* Define to 1 if you have the `backtrace' function. */ +#undef HAVE_BACKTRACE + +/* Define to 1 if you have the `bcopy' function. */ +#undef HAVE_BCOPY + +/* Does not have bi-directional iterator */ +#undef HAVE_BI_ITERATOR + +/* Define to 1 if you have the `ceilf' function. */ +#cmakedefine HAVE_CEILF ${HAVE_CEILF} + +/* Define to 1 if you have the `closedir' function. */ +#undef HAVE_CLOSEDIR + +/* Define to 1 if you have the <ctype.h> header file. */ +#undef HAVE_CTYPE_H + +/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'. + */ +#cmakedefine HAVE_DIRENT_H ${HAVE_DIRENT_H} + +/* Define if you have the GNU dld library. */ +#undef HAVE_DLD + +/* Define to 1 if you have the <dld.h> header file. */ +#cmakedefine HAVE_DLD_H ${HAVE_DLD_H} + +/* Define to 1 if you have the `dlerror' function. */ +#undef HAVE_DLERROR + +/* Define to 1 if you have the <dlfcn.h> header file. */ +#cmakedefine HAVE_DLFCN_H ${HAVE_DLFCN_H} + +/* Define if dlopen() is available on this platform. */ +#undef HAVE_DLOPEN + +/* Define to 1 if you have the <dl.h> header file. */ +#cmakedefine HAVE_DL_H ${HAVE_DL_H} + +/* Define if the dot program is available */ +#undef HAVE_DOT + +/* Define if the dotty program is available */ +#undef HAVE_DOTTY + +/* Define if you have the _dyld_func_lookup function. */ +#undef HAVE_DYLD + +/* Define to 1 if you have the <errno.h> header file. */ +#cmakedefine HAVE_ERRNO_H ${HAVE_ERRNO_H} + +/* Define to 1 if the system has the type `error_t'. */ +#undef HAVE_ERROR_T + +/* Define to 1 if you have the <execinfo.h> header file. */ +#cmakedefine HAVE_EXECINFO_H ${HAVE_EXECINFO_H} + +/* Define to 1 if you have the <fcntl.h> header file. */ +#cmakedefine HAVE_FCNTL_H ${HAVE_FCNTL_H} + +/* Set to 1 if the finite function is found in <ieeefp.h> */ +#undef HAVE_FINITE_IN_IEEEFP_H + +/* Define to 1 if you have the `floorf' function. */ +#cmakedefine HAVE_FLOORF ${HAVE_FLOORF} + +/* Does not have forward iterator */ +#undef HAVE_FWD_ITERATOR + +/* Define to 1 if you have the `getcwd' function. */ +#undef HAVE_GETCWD + +/* Define to 1 if you have the `getpagesize' function. */ +#cmakedefine HAVE_GETPAGESIZE ${HAVE_GETPAGESIZE} + +/* Define to 1 if you have the `getrlimit' function. */ +#undef HAVE_GETRLIMIT + +/* Define to 1 if you have the `getrusage' function. */ +#cmakedefine HAVE_GETRUSAGE ${HAVE_GETRUSAGE} + +/* Define to 1 if you have the `gettimeofday' function. */ +#undef HAVE_GETTIMEOFDAY + +/* Does not have <hash_map> */ +#undef HAVE_GLOBAL_HASH_MAP + +/* Does not have hash_set in global namespace */ +#undef HAVE_GLOBAL_HASH_SET + +/* Does not have ext/hash_map */ +#undef HAVE_GNU_EXT_HASH_MAP + +/* Does not have hash_set in gnu namespace */ +#undef HAVE_GNU_EXT_HASH_SET + +/* Define if the Graphviz program is available */ +#undef HAVE_GRAPHVIZ + +/* Define if the gv program is available */ +#undef HAVE_GV + +/* Define to 1 if you have the `index' function. */ +#undef HAVE_INDEX + +/* Define to 1 if the system has the type `int64_t'. */ +#undef HAVE_INT64_T + +/* Define to 1 if you have the <inttypes.h> header file. */ +#cmakedefine HAVE_INTTYPES_H ${HAVE_INTTYPES_H} + +/* Define to 1 if you have the `isatty' function. */ +#cmakedefine HAVE_ISATTY 1 + +/* Set to 1 if the isinf function is found in <cmath> */ +#cmakedefine HAVE_ISINF_IN_CMATH ${HAVE_ISINF_IN_CMATH} + +/* Set to 1 if the isinf function is found in <math.h> */ +#cmakedefine HAVE_ISINF_IN_MATH_H ${HAVE_ISINF_IN_MATH_H} + +/* Set to 1 if the isnan function is found in <cmath> */ +#cmakedefine HAVE_ISNAN_IN_CMATH ${HAVE_ISNAN_IN_CMATH} + +/* Set to 1 if the isnan function is found in <math.h> */ +#cmakedefine HAVE_ISNAN_IN_MATH_H ${HAVE_ISNAN_IN_MATH_H} + +/* Define if you have the libdl library or equivalent. */ +#undef HAVE_LIBDL + +/* Define to 1 if you have the `elf' library (-lelf). */ +#undef HAVE_LIBELF + +/* Define to 1 if you have the `imagehlp' library (-limagehlp). */ +#cmakedefine HAVE_LIBIMAGEHLP ${HAVE_LIBIMAGEHLP} + +/* Define to 1 if you have the `m' library (-lm). */ +#undef HAVE_LIBM + +/* Define to 1 if you have the `psapi' library (-lpsapi). */ +#cmakedefine HAVE_LIBPSAPI ${HAVE_LIBPSAPI} + +/* Define to 1 if you have the `pthread' library (-lpthread). */ +#cmakedefine HAVE_LIBPTHREAD ${HAVE_LIBPTHREAD} + +/* Define to 1 if you have the `udis86' library (-ludis86). */ +#undef HAVE_LIBUDIS86 + +/* Define to 1 if you have the <limits.h> header file. */ +#cmakedefine HAVE_LIMITS_H ${HAVE_LIMITS_H} + +/* Define to 1 if you have the <link.h> header file. */ +#cmakedefine HAVE_LINK_H ${HAVE_LINK_H} + +/* Define if you can use -Wl,-R. to pass -R. to the linker, in order to add + the current directory to the dynamic linker search path. */ +#undef HAVE_LINK_R + +/* Define to 1 if you have the `longjmp' function. */ +#undef HAVE_LONGJMP + +/* Define to 1 if you have the <mach/mach.h> header file. */ +#undef HAVE_MACH_MACH_H + +/* Define to 1 if you have the <mach-o/dyld.h> header file. */ +#undef HAVE_MACH_O_DYLD_H + +/* Define if mallinfo() is available on this platform. */ +#cmakedefine HAVE_MALLINFO ${HAVE_MALLINFO} + +/* Define to 1 if you have the <malloc.h> header file. */ +#cmakedefine HAVE_MALLOC_H ${HAVE_MALLOC_H} + +/* Define to 1 if you have the <malloc/malloc.h> header file. */ +#cmakedefine HAVE_MALLOC_MALLOC_H ${HAVE_MALLOC_MALLOC_H} + +/* Define to 1 if you have the `malloc_zone_statistics' function. */ +#undef HAVE_MALLOC_ZONE_STATISTICS + +/* Define to 1 if you have the `memcpy' function. */ +#undef HAVE_MEMCPY + +/* Define to 1 if you have the `memmove' function. */ +#undef HAVE_MEMMOVE + +/* Define to 1 if you have the <memory.h> header file. */ +#cmakedefine HAVE_MEMORY_H ${HAVE_MEMORY_H} + +/* Define to 1 if you have the `mkdtemp' function. */ +#undef HAVE_MKDTEMP + +/* Define to 1 if you have the `mkstemp' function. */ +#undef HAVE_MKSTEMP + +/* Define to 1 if you have the `mktemp' function. */ +#undef HAVE_MKTEMP + +/* Define to 1 if you have a working `mmap' system call. */ +#undef HAVE_MMAP + +/* Define if mmap() uses MAP_ANONYMOUS to map anonymous pages, or undefine if + it uses MAP_ANON */ +#undef HAVE_MMAP_ANONYMOUS + +/* Define if mmap() can map files into memory */ +#undef HAVE_MMAP_FILE + +/* define if the compiler implements namespaces */ +#undef HAVE_NAMESPACES + +/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */ +#cmakedefine HAVE_NDIR_H ${HAVE_NDIR_H} + +/* Define to 1 if you have the `nearbyintf' function. */ +#undef HAVE_NEARBYINTF + +/* Define to 1 if you have the `opendir' function. */ +#undef HAVE_OPENDIR + +/* Define if libtool can extract symbol lists from object files. */ +#undef HAVE_PRELOADED_SYMBOLS + +/* Define to have the %a format string */ +#undef HAVE_PRINTF_A + +/* Have pthread.h */ +#cmakedefine HAVE_PTHREAD_H ${HAVE_PTHREAD_H} + +/* Have pthread_mutex_lock */ +#cmakedefine HAVE_PTHREAD_MUTEX_LOCK ${HAVE_PTHREAD_MUTEX_LOCK} + +/* Define to 1 if srand48/lrand48/drand48 exist in <stdlib.h> */ +#undef HAVE_RAND48 + +/* Define to 1 if you have the `readdir' function. */ +#undef HAVE_READDIR + +/* Define to 1 if you have the `realpath' function. */ +#undef HAVE_REALPATH + +/* Define to 1 if you have the `rindex' function. */ +#undef HAVE_RINDEX + +/* Define to 1 if you have the `rintf' function. */ +#undef HAVE_RINTF + +/* Define to 1 if you have the `roundf' function. */ +#undef HAVE_ROUNDF + +/* Define to 1 if you have the `sbrk' function. */ +#undef HAVE_SBRK + +/* Define to 1 if you have the `setjmp' function. */ +#undef HAVE_SETJMP + +/* Define to 1 if you have the <setjmp.h> header file. */ +#cmakedefine HAVE_SETJMP_H ${HAVE_SETJMP_H} + +/* Define to 1 if you have the `setrlimit' function. */ +#cmakedefine HAVE_SETRLIMIT ${HAVE_SETRLIMIT} + +/* Define if you have the shl_load function. */ +#undef HAVE_SHL_LOAD + +/* Define to 1 if you have the `siglongjmp' function. */ +#undef HAVE_SIGLONGJMP + +/* Define to 1 if you have the <signal.h> header file. */ +#cmakedefine HAVE_SIGNAL_H ${HAVE_SIGNAL_H} + +/* Define to 1 if you have the `sigsetjmp' function. */ +#undef HAVE_SIGSETJMP + +/* Define to 1 if you have the <stdint.h> header file. */ +#cmakedefine HAVE_STDINT_H ${HAVE_STDINT_H} + +/* Define to 1 if you have the <stdio.h> header file. */ +#cmakedefine HAVE_STDIO_H ${HAVE_STDIO_H} + +/* Define to 1 if you have the <stdlib.h> header file. */ +#cmakedefine HAVE_STDLIB_H ${HAVE_STDLIB_H} + +/* Does not have ext/hash_map> */ +#undef HAVE_STD_EXT_HASH_MAP + +/* Does not have hash_set in std namespace */ +#undef HAVE_STD_EXT_HASH_SET + +/* Set to 1 if the std::isinf function is found in <cmath> */ +#undef HAVE_STD_ISINF_IN_CMATH + +/* Set to 1 if the std::isnan function is found in <cmath> */ +#undef HAVE_STD_ISNAN_IN_CMATH + +/* Does not have std namespace iterator */ +#undef HAVE_STD_ITERATOR + +/* Define to 1 if you have the `strchr' function. */ +#undef HAVE_STRCHR + +/* Define to 1 if you have the `strcmp' function. */ +#undef HAVE_STRCMP + +/* Define to 1 if you have the `strdup' function. */ +#undef HAVE_STRDUP + +/* Define to 1 if you have the `strerror' function. */ +#undef HAVE_STRERROR + +/* Define to 1 if you have the `strerror_r' function. */ +#undef HAVE_STRERROR_R + +/* Define to 1 if you have the <strings.h> header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the <string.h> header file. */ +#cmakedefine HAVE_STRING_H ${HAVE_STRING_H} + +/* Define to 1 if you have the `strrchr' function. */ +#undef HAVE_STRRCHR + +/* Define to 1 if you have the `strtoll' function. */ +#cmakedefine HAVE_STRTOLL ${HAVE_STRTOLL} + +/* Define to 1 if you have the `strtoq' function. */ +#undef HAVE_STRTOQ + +/* Define to 1 if you have the `sysconf' function. */ +#undef HAVE_SYSCONF + +/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'. + */ +#cmakedefine HAVE_SYS_DIR_H ${HAVE_SYS_DIR_H} + +/* Define to 1 if you have the <sys/dl.h> header file. */ +#cmakedefine HAVE_SYS_DL_H ${HAVE_SYS_DL_H} + +/* Define to 1 if you have the <sys/ioctl.h> header file. */ +#cmakedefine HAVE_SYS_IOCTL_H ${HAVE_SYS_IOCTL_H} + +/* Define to 1 if you have the <sys/mman.h> header file. */ +#cmakedefine HAVE_SYS_MMAN_H ${} + +/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'. + */ +#cmakedefine HAVE_SYS_NDIR_H ${HAVE_SYS_NDIR_H} + +/* Define to 1 if you have the <sys/param.h> header file. */ +#cmakedefine HAVE_SYS_PARAM_H ${HAVE_SYS_PARAM_H} + +/* Define to 1 if you have the <sys/resource.h> header file. */ +#cmakedefine HAVE_SYS_RESOURCE_H ${HAVE_SYS_RESOURCE_H} + +/* Define to 1 if you have the <sys/stat.h> header file. */ +#cmakedefine HAVE_SYS_STAT_H ${HAVE_SYS_STAT_H} + +/* Define to 1 if you have the <sys/time.h> header file. */ +#cmakedefine HAVE_SYS_TIME_H ${HAVE_SYS_TIME_H} + +/* Define to 1 if you have the <sys/types.h> header file. */ +#cmakedefine HAVE_SYS_TYPES_H ${HAVE_SYS_TYPES_H} + +/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */ +#undef HAVE_SYS_WAIT_H + +/* Define to 1 if the system has the type `uint64_t'. */ +#undef HAVE_UINT64_T + +/* Define to 1 if you have the <termios.h> header file. */ +#cmakedefine HAVE_TERMIOS_H ${HAVE_TERMIOS_H} + +/* Define to 1 if you have the <unistd.h> header file. */ +#cmakedefine HAVE_UNISTD_H ${HAVE_UNISTD_H} + +/* Define to 1 if you have the <utime.h> header file. */ +#cmakedefine HAVE_UTIME_H ${HAVE_UTIME_H} + +/* Define to 1 if the system has the type `u_int64_t'. */ +#undef HAVE_U_INT64_T + +/* Define to 1 if you have the <windows.h> header file. */ +#cmakedefine HAVE_WINDOWS_H ${HAVE_WINDOWS_H} + +/* Installation directory for binary executables */ +#undef LLVM_BINDIR + +/* Time at which LLVM was configured */ +#undef LLVM_CONFIGTIME + +/* Installation directory for documentation */ +#undef LLVM_DATADIR + +/* Installation directory for config files */ +#undef LLVM_ETCDIR + +/* Host triple we were built on */ +#cmakedefine LLVM_HOSTTRIPLE "${LLVM_HOSTTRIPLE}" + +/* Installation directory for include files */ +#undef LLVM_INCLUDEDIR + +/* Installation directory for .info files */ +#undef LLVM_INFODIR + +/* Installation directory for libraries */ +#undef LLVM_LIBDIR + +/* Installation directory for man pages */ +#undef LLVM_MANDIR + +/* Define if this is Unixish platform */ +#cmakedefine LLVM_ON_UNIX ${LLVM_ON_UNIX} + +/* Define if this is Win32ish platform */ +#cmakedefine LLVM_ON_WIN32 ${LLVM_ON_WIN32} + +/* Added by Kevin -- Maximum path length */ +#cmakedefine MAXPATHLEN ${MAXPATHLEN} + +/* Define to path to dot program if found or 'echo dot' otherwise */ +#undef LLVM_PATH_DOT + +/* Define to path to dotty program if found or 'echo dotty' otherwise */ +#undef LLVM_PATH_DOTTY + +/* Define to path to Graphviz program if found or 'echo Graphviz' otherwise */ +#undef LLVM_PATH_GRAPHVIZ + +/* Define to path to gv program if found or 'echo gv' otherwise */ +#undef LLVM_PATH_GV + +/* Installation prefix directory */ +#undef LLVM_PREFIX + +/* Define if the OS needs help to load dependent libraries for dlopen(). */ +#cmakedefine LTDL_DLOPEN_DEPLIBS ${LTDL_DLOPEN_DEPLIBS} + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LTDL_OBJDIR + +/* Define to the name of the environment variable that determines the dynamic + library search path. */ +#cmakedefine LTDL_SHLIBPATH_VAR "${LTDL_SHLIBPATH_VAR}" + +/* Define to the extension used for shared libraries, say, ".so". */ +#cmakedefine LTDL_SHLIB_EXT "${LTDL_SHLIB_EXT}" + +/* Define to the system default library search path. */ +#cmakedefine LTDL_SYSSEARCHPATH "${LTDL_SYSSEARCHPATH}" + +/* Define if /dev/zero should be used when mapping RWX memory, or undefine if + its not necessary */ +#undef NEED_DEV_ZERO_FOR_MMAP + +/* Define if dlsym() requires a leading underscore in symbol names. */ +#undef NEED_USCORE + +/* Define to the address where bug reports for this package should be sent. */ +#cmakedefine PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}" + +/* Define to the full name of this package. */ +#cmakedefine PACKAGE_NAME "${PACKAGE_NAME}" + +/* Define to the full name and version of this package. */ +#cmakedefine PACKAGE_STRING "${PACKAGE_STRING}" + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the version of this package. */ +#cmakedefine PACKAGE_VERSION "${PACKAGE_VERSION}" + +/* Define as the return type of signal handlers (`int' or `void'). */ +#cmakedefine RETSIGTYPE ${RETSIGTYPE} + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +#undef STACK_DIRECTION + +/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */ +#undef STAT_MACROS_BROKEN + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */ +#undef TIME_WITH_SYS_TIME + +/* Define to 1 if your <sys/time.h> declares `struct tm'. */ +#undef TM_IN_SYS_TIME + +/* Define if use udis86 library */ +#undef USE_UDIS86 + +/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a + `char[]'. */ +#undef YYTEXT_POINTER + +/* Define to empty if `const' does not conform to ANSI C. */ +#undef const + +/* Define to a type to use for `error_t' if it is not otherwise available. */ +#cmakedefine error_t ${error_t} + +/* Define to a type to use for `mode_t' if it is not otherwise available. */ +#cmakedefine mode_t ${mode_t} + +/* Define to `int' if <sys/types.h> does not define. */ +#undef pid_t + +/* Define to `unsigned int' if <sys/types.h> does not define. */ +#undef size_t + +/* Define to a function replacing strtoll */ +#cmakedefine strtoll ${strtoll} + +/* Define to a function implementing strtoull */ +#cmakedefine strtoull ${strtoull} + +/* Define to a function implementing stricmp */ +#cmakedefine stricmp ${stricmp} + +/* Define to a function implementing strdup */ +#cmakedefine strdup ${strdup} diff --git a/include/llvm/Config/config.h.in b/include/llvm/Config/config.h.in new file mode 100644 index 0000000..eee2f94 --- /dev/null +++ b/include/llvm/Config/config.h.in @@ -0,0 +1,572 @@ +/* include/llvm/Config/config.h.in. Generated from autoconf/configure.ac by autoheader. */ + +/* Define if dlopen(0) will open the symbols of the program */ +#undef CAN_DLOPEN_SELF + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +#undef CRAY_STACKSEG_END + +/* Define to 1 if using `alloca.c'. */ +#undef C_ALLOCA + +/* Define if CBE is enabled for printf %a output */ +#undef ENABLE_CBE_PRINTF_A + +/* Define if position independent code is enabled */ +#undef ENABLE_PIC + +/* Define if threads enabled */ +#undef ENABLE_THREADS + +/* Define to 1 if you have `alloca', as a function or macro. */ +#undef HAVE_ALLOCA + +/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix). + */ +#undef HAVE_ALLOCA_H + +/* Define to 1 if you have the `argz_append' function. */ +#undef HAVE_ARGZ_APPEND + +/* Define to 1 if you have the `argz_create_sep' function. */ +#undef HAVE_ARGZ_CREATE_SEP + +/* Define to 1 if you have the <argz.h> header file. */ +#undef HAVE_ARGZ_H + +/* Define to 1 if you have the `argz_insert' function. */ +#undef HAVE_ARGZ_INSERT + +/* Define to 1 if you have the `argz_next' function. */ +#undef HAVE_ARGZ_NEXT + +/* Define to 1 if you have the `argz_stringify' function. */ +#undef HAVE_ARGZ_STRINGIFY + +/* Define to 1 if you have the <assert.h> header file. */ +#undef HAVE_ASSERT_H + +/* Define to 1 if you have the `backtrace' function. */ +#undef HAVE_BACKTRACE + +/* Define to 1 if you have the `bcopy' function. */ +#undef HAVE_BCOPY + +/* Does not have bi-directional iterator */ +#undef HAVE_BI_ITERATOR + +/* Define to 1 if you have the `ceilf' function. */ +#undef HAVE_CEILF + +/* Define to 1 if you have the `closedir' function. */ +#undef HAVE_CLOSEDIR + +/* Define to 1 if you have the <ctype.h> header file. */ +#undef HAVE_CTYPE_H + +/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'. + */ +#undef HAVE_DIRENT_H + +/* Define if you have the GNU dld library. */ +#undef HAVE_DLD + +/* Define to 1 if you have the <dld.h> header file. */ +#undef HAVE_DLD_H + +/* Define to 1 if you have the `dlerror' function. */ +#undef HAVE_DLERROR + +/* Define to 1 if you have the <dlfcn.h> header file. */ +#undef HAVE_DLFCN_H + +/* Define if dlopen() is available on this platform. */ +#undef HAVE_DLOPEN + +/* Define to 1 if you have the <dl.h> header file. */ +#undef HAVE_DL_H + +/* Define if the dot program is available */ +#undef HAVE_DOT + +/* Define if the dotty program is available */ +#undef HAVE_DOTTY + +/* Define if you have the _dyld_func_lookup function. */ +#undef HAVE_DYLD + +/* Define to 1 if you have the <errno.h> header file. */ +#undef HAVE_ERRNO_H + +/* Define to 1 if the system has the type `error_t'. */ +#undef HAVE_ERROR_T + +/* Define to 1 if you have the <execinfo.h> header file. */ +#undef HAVE_EXECINFO_H + +/* Define to 1 if you have the <fcntl.h> header file. */ +#undef HAVE_FCNTL_H + +/* Define if libffi is available on this platform. */ +#undef HAVE_FFI_CALL + +/* Define to 1 if you have the <ffi/ffi.h> header file. */ +#undef HAVE_FFI_FFI_H + +/* Define to 1 if you have the <ffi.h> header file. */ +#undef HAVE_FFI_H + +/* Set to 1 if the finite function is found in <ieeefp.h> */ +#undef HAVE_FINITE_IN_IEEEFP_H + +/* Define to 1 if you have the `floorf' function. */ +#undef HAVE_FLOORF + +/* Define to 1 if you have the `fmodf' function. */ +#undef HAVE_FMODF + +/* Does not have forward iterator */ +#undef HAVE_FWD_ITERATOR + +/* Define to 1 if you have the `getcwd' function. */ +#undef HAVE_GETCWD + +/* Define to 1 if you have the `getpagesize' function. */ +#undef HAVE_GETPAGESIZE + +/* Define to 1 if you have the `getrlimit' function. */ +#undef HAVE_GETRLIMIT + +/* Define to 1 if you have the `getrusage' function. */ +#undef HAVE_GETRUSAGE + +/* Define to 1 if you have the `gettimeofday' function. */ +#undef HAVE_GETTIMEOFDAY + +/* Define if the Graphviz program is available */ +#undef HAVE_GRAPHVIZ + +/* Define if the gv program is available */ +#undef HAVE_GV + +/* Define to 1 if you have the `index' function. */ +#undef HAVE_INDEX + +/* Define to 1 if the system has the type `int64_t'. */ +#undef HAVE_INT64_T + +/* Define to 1 if you have the <inttypes.h> header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `isatty' function. */ +#undef HAVE_ISATTY + +/* Set to 1 if the isinf function is found in <cmath> */ +#undef HAVE_ISINF_IN_CMATH + +/* Set to 1 if the isinf function is found in <math.h> */ +#undef HAVE_ISINF_IN_MATH_H + +/* Set to 1 if the isnan function is found in <cmath> */ +#undef HAVE_ISNAN_IN_CMATH + +/* Set to 1 if the isnan function is found in <math.h> */ +#undef HAVE_ISNAN_IN_MATH_H + +/* Define if you have the libdl library or equivalent. */ +#undef HAVE_LIBDL + +/* Define to 1 if you have the `elf' library (-lelf). */ +#undef HAVE_LIBELF + +/* Define to 1 if you have the `imagehlp' library (-limagehlp). */ +#undef HAVE_LIBIMAGEHLP + +/* Define to 1 if you have the `m' library (-lm). */ +#undef HAVE_LIBM + +/* Define to 1 if you have the `psapi' library (-lpsapi). */ +#undef HAVE_LIBPSAPI + +/* Define to 1 if you have the `pthread' library (-lpthread). */ +#undef HAVE_LIBPTHREAD + +/* Define to 1 if you have the `udis86' library (-ludis86). */ +#undef HAVE_LIBUDIS86 + +/* Define to 1 if you have the <limits.h> header file. */ +#undef HAVE_LIMITS_H + +/* Define if you can use -Wl,-export-dynamic. */ +#undef HAVE_LINK_EXPORT_DYNAMIC + +/* Define to 1 if you have the <link.h> header file. */ +#undef HAVE_LINK_H + +/* Define if you can use -Wl,-R. to pass -R. to the linker, in order to add + the current directory to the dynamic linker search path. */ +#undef HAVE_LINK_R + +/* Define to 1 if you have the `longjmp' function. */ +#undef HAVE_LONGJMP + +/* Define to 1 if you have the <mach/mach.h> header file. */ +#undef HAVE_MACH_MACH_H + +/* Define to 1 if you have the <mach-o/dyld.h> header file. */ +#undef HAVE_MACH_O_DYLD_H + +/* Define if mallinfo() is available on this platform. */ +#undef HAVE_MALLINFO + +/* Define to 1 if you have the <malloc.h> header file. */ +#undef HAVE_MALLOC_H + +/* Define to 1 if you have the <malloc/malloc.h> header file. */ +#undef HAVE_MALLOC_MALLOC_H + +/* Define to 1 if you have the `malloc_zone_statistics' function. */ +#undef HAVE_MALLOC_ZONE_STATISTICS + +/* Define to 1 if you have the `memcpy' function. */ +#undef HAVE_MEMCPY + +/* Define to 1 if you have the `memmove' function. */ +#undef HAVE_MEMMOVE + +/* Define to 1 if you have the <memory.h> header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `mkdtemp' function. */ +#undef HAVE_MKDTEMP + +/* Define to 1 if you have the `mkstemp' function. */ +#undef HAVE_MKSTEMP + +/* Define to 1 if you have the `mktemp' function. */ +#undef HAVE_MKTEMP + +/* Define to 1 if you have a working `mmap' system call. */ +#undef HAVE_MMAP + +/* Define if mmap() uses MAP_ANONYMOUS to map anonymous pages, or undefine if + it uses MAP_ANON */ +#undef HAVE_MMAP_ANONYMOUS + +/* Define if mmap() can map files into memory */ +#undef HAVE_MMAP_FILE + +/* define if the compiler implements namespaces */ +#undef HAVE_NAMESPACES + +/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */ +#undef HAVE_NDIR_H + +/* Define to 1 if you have the `nearbyintf' function. */ +#undef HAVE_NEARBYINTF + +/* Define to 1 if you have the `opendir' function. */ +#undef HAVE_OPENDIR + +/* Define to 1 if you have the `powf' function. */ +#undef HAVE_POWF + +/* Define if libtool can extract symbol lists from object files. */ +#undef HAVE_PRELOADED_SYMBOLS + +/* Define to have the %a format string */ +#undef HAVE_PRINTF_A + +/* Define to 1 if you have the <pthread.h> header file. */ +#undef HAVE_PTHREAD_H + +/* Have pthread_mutex_lock */ +#undef HAVE_PTHREAD_MUTEX_LOCK + +/* Define to 1 if srand48/lrand48/drand48 exist in <stdlib.h> */ +#undef HAVE_RAND48 + +/* Define to 1 if you have the `readdir' function. */ +#undef HAVE_READDIR + +/* Define to 1 if you have the `realpath' function. */ +#undef HAVE_REALPATH + +/* Define to 1 if you have the `rindex' function. */ +#undef HAVE_RINDEX + +/* Define to 1 if you have the `rintf' function. */ +#undef HAVE_RINTF + +/* Define to 1 if you have the `round' function. */ +#undef HAVE_ROUND + +/* Define to 1 if you have the `roundf' function. */ +#undef HAVE_ROUNDF + +/* Define to 1 if you have the `sbrk' function. */ +#undef HAVE_SBRK + +/* Define to 1 if you have the `setjmp' function. */ +#undef HAVE_SETJMP + +/* Define to 1 if you have the <setjmp.h> header file. */ +#undef HAVE_SETJMP_H + +/* Define to 1 if you have the `setrlimit' function. */ +#undef HAVE_SETRLIMIT + +/* Define if you have the shl_load function. */ +#undef HAVE_SHL_LOAD + +/* Define to 1 if you have the `siglongjmp' function. */ +#undef HAVE_SIGLONGJMP + +/* Define to 1 if you have the <signal.h> header file. */ +#undef HAVE_SIGNAL_H + +/* Define to 1 if you have the `sigsetjmp' function. */ +#undef HAVE_SIGSETJMP + +/* Define to 1 if you have the <stdint.h> header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the <stdio.h> header file. */ +#undef HAVE_STDIO_H + +/* Define to 1 if you have the <stdlib.h> header file. */ +#undef HAVE_STDLIB_H + +/* Set to 1 if the std::isinf function is found in <cmath> */ +#undef HAVE_STD_ISINF_IN_CMATH + +/* Set to 1 if the std::isnan function is found in <cmath> */ +#undef HAVE_STD_ISNAN_IN_CMATH + +/* Does not have std namespace iterator */ +#undef HAVE_STD_ITERATOR + +/* Define to 1 if you have the `strchr' function. */ +#undef HAVE_STRCHR + +/* Define to 1 if you have the `strcmp' function. */ +#undef HAVE_STRCMP + +/* Define to 1 if you have the `strdup' function. */ +#undef HAVE_STRDUP + +/* Define to 1 if you have the `strerror' function. */ +#undef HAVE_STRERROR + +/* Define to 1 if you have the `strerror_r' function. */ +#undef HAVE_STRERROR_R + +/* Define to 1 if you have the <strings.h> header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the <string.h> header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the `strrchr' function. */ +#undef HAVE_STRRCHR + +/* Define to 1 if you have the `strtof' function. */ +#undef HAVE_STRTOF + +/* Define to 1 if you have the `strtoll' function. */ +#undef HAVE_STRTOLL + +/* Define to 1 if you have the `strtoq' function. */ +#undef HAVE_STRTOQ + +/* Define to 1 if you have the `sysconf' function. */ +#undef HAVE_SYSCONF + +/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'. + */ +#undef HAVE_SYS_DIR_H + +/* Define to 1 if you have the <sys/dl.h> header file. */ +#undef HAVE_SYS_DL_H + +/* Define to 1 if you have the <sys/ioctl.h> header file. */ +#undef HAVE_SYS_IOCTL_H + +/* Define to 1 if you have the <sys/mman.h> header file. */ +#undef HAVE_SYS_MMAN_H + +/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'. + */ +#undef HAVE_SYS_NDIR_H + +/* Define to 1 if you have the <sys/param.h> header file. */ +#undef HAVE_SYS_PARAM_H + +/* Define to 1 if you have the <sys/resource.h> header file. */ +#undef HAVE_SYS_RESOURCE_H + +/* Define to 1 if you have the <sys/stat.h> header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the <sys/time.h> header file. */ +#undef HAVE_SYS_TIME_H + +/* Define to 1 if you have the <sys/types.h> header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */ +#undef HAVE_SYS_WAIT_H + +/* Define to 1 if you have the <termios.h> header file. */ +#undef HAVE_TERMIOS_H + +/* Define to 1 if the system has the type `uint64_t'. */ +#undef HAVE_UINT64_T + +/* Define to 1 if you have the <unistd.h> header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the <utime.h> header file. */ +#undef HAVE_UTIME_H + +/* Define to 1 if the system has the type `u_int64_t'. */ +#undef HAVE_U_INT64_T + +/* Define to 1 if you have the <windows.h> header file. */ +#undef HAVE_WINDOWS_H + +/* Define to 1 if you have the `__dso_handle' function. */ +#undef HAVE___DSO_HANDLE + +/* Installation directory for binary executables */ +#undef LLVM_BINDIR + +/* Time at which LLVM was configured */ +#undef LLVM_CONFIGTIME + +/* Installation directory for data files */ +#undef LLVM_DATADIR + +/* Installation directory for documentation */ +#undef LLVM_DOCSDIR + +/* Installation directory for config files */ +#undef LLVM_ETCDIR + +/* Host triple we were built on */ +#undef LLVM_HOSTTRIPLE + +/* Installation directory for include files */ +#undef LLVM_INCLUDEDIR + +/* Installation directory for .info files */ +#undef LLVM_INFODIR + +/* Installation directory for libraries */ +#undef LLVM_LIBDIR + +/* Installation directory for man pages */ +#undef LLVM_MANDIR + +/* Build multithreading support into LLVM */ +#undef LLVM_MULTITHREADED + +/* Define if this is Unixish platform */ +#undef LLVM_ON_UNIX + +/* Define if this is Win32ish platform */ +#undef LLVM_ON_WIN32 + +/* Define to path to dot program if found or 'echo dot' otherwise */ +#undef LLVM_PATH_DOT + +/* Define to path to dotty program if found or 'echo dotty' otherwise */ +#undef LLVM_PATH_DOTTY + +/* Define to path to Graphviz program if found or 'echo Graphviz' otherwise */ +#undef LLVM_PATH_GRAPHVIZ + +/* Define to path to gv program if found or 'echo gv' otherwise */ +#undef LLVM_PATH_GV + +/* Installation prefix directory */ +#undef LLVM_PREFIX + +/* Define if the OS needs help to load dependent libraries for dlopen(). */ +#undef LTDL_DLOPEN_DEPLIBS + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LTDL_OBJDIR + +/* Define to the name of the environment variable that determines the dynamic + library search path. */ +#undef LTDL_SHLIBPATH_VAR + +/* Define to the extension used for shared libraries, say, ".so". */ +#undef LTDL_SHLIB_EXT + +/* Define to the system default library search path. */ +#undef LTDL_SYSSEARCHPATH + +/* Define if /dev/zero should be used when mapping RWX memory, or undefine if + its not necessary */ +#undef NEED_DEV_ZERO_FOR_MMAP + +/* Define if dlsym() requires a leading underscore in symbol names. */ +#undef NEED_USCORE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define as the return type of signal handlers (`int' or `void'). */ +#undef RETSIGTYPE + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +#undef STACK_DIRECTION + +/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */ +#undef STAT_MACROS_BROKEN + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */ +#undef TIME_WITH_SYS_TIME + +/* Define to 1 if your <sys/time.h> declares `struct tm'. */ +#undef TM_IN_SYS_TIME + +/* Define if use udis86 library */ +#undef USE_UDIS86 + +/* Define to empty if `const' does not conform to ANSI C. */ +#undef const + +/* Define to a type to use for `error_t' if it is not otherwise available. */ +#undef error_t + +/* Define to `int' if <sys/types.h> does not define. */ +#undef pid_t + +/* Define to `unsigned int' if <sys/types.h> does not define. */ +#undef size_t diff --git a/include/llvm/Constant.h b/include/llvm/Constant.h new file mode 100644 index 0000000..d4949d1 --- /dev/null +++ b/include/llvm/Constant.h @@ -0,0 +1,142 @@ +//===-- llvm/Constant.h - Constant class definition -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the Constant class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CONSTANT_H +#define LLVM_CONSTANT_H + +#include "llvm/User.h" + +namespace llvm { + template<typename T> class SmallVectorImpl; + + /// If object contains references to other objects, then relocations are + /// usually required for emission of such object (especially in PIC mode). One + /// usually distinguishes local and global relocations. Local relocations are + /// made wrt objects in the same module and these objects have local (internal + /// or private) linkage. Global relocations are made wrt externally visible + /// objects. In most cases local relocations can be resolved via so-called + /// 'pre-link' technique. + namespace Reloc { + const unsigned None = 0; + const unsigned Local = 1 << 0; ///< Local relocations are required + const unsigned Global = 1 << 1; ///< Global relocations are required + const unsigned LocalOrGlobal = Local | Global; + } + +/// This is an important base class in LLVM. It provides the common facilities +/// of all constant values in an LLVM program. A constant is a value that is +/// immutable at runtime. Functions are constants because their address is +/// immutable. Same with global variables. +/// +/// All constants share the capabilities provided in this class. All constants +/// can have a null value. They can have an operand list. Constants can be +/// simple (integer and floating point values), complex (arrays and structures), +/// or expression based (computations yielding a constant value composed of +/// only certain operators and other constant values). +/// +/// Note that Constants are immutable (once created they never change) +/// and are fully shared by structural equivalence. This means that two +/// structurally equivalent constants will always have the same address. +/// Constants are created on demand as needed and never deleted: thus clients +/// don't have to worry about the lifetime of the objects. +/// @brief LLVM Constant Representation +class Constant : public User { + void operator=(const Constant &); // Do not implement + Constant(const Constant &); // Do not implement +protected: + Constant(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps) + : User(ty, vty, Ops, NumOps) {} + + void destroyConstantImpl(); +public: + /// Static constructor to get a '0' constant of arbitrary type... + /// + static Constant *getNullValue(const Type *Ty); + + /// Static constructor to get a '-1' constant. This supports integers and + /// vectors. + /// + static Constant *getAllOnesValue(const Type *Ty); + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. + virtual bool isNullValue() const = 0; + + /// canTrap - Return true if evaluation of this constant could trap. This is + /// true for things like constant expressions that could divide by zero. + bool canTrap() const; + + /// ContainsRelocations - Return true if the constant value contains + /// relocations which cannot be resolved at compile time. Note that answer is + /// not exclusive: there can be possibility that relocations of other kind are + /// required as well. + bool ContainsRelocations(unsigned Kind = Reloc::LocalOrGlobal) const; + + // Specialize get/setOperand for Constants as their operands are always + // constants as well. + Constant *getOperand(unsigned i) { + return static_cast<Constant*>(User::getOperand(i)); + } + const Constant *getOperand(unsigned i) const { + return static_cast<const Constant*>(User::getOperand(i)); + } + void setOperand(unsigned i, Constant *C) { + User::setOperand(i, C); + } + + /// getVectorElements - This method, which is only valid on constant of vector + /// type, returns the elements of the vector in the specified smallvector. + /// This handles breaking down a vector undef into undef elements, etc. For + /// constant exprs and other cases we can't handle, we return an empty vector. + void getVectorElements(SmallVectorImpl<Constant*> &Elts) const; + + /// destroyConstant - Called if some element of this constant is no longer + /// valid. At this point only other constants may be on the use_list for this + /// constant. Any constants on our Use list must also be destroy'd. The + /// implementation must be sure to remove the constant from the list of + /// available cached constants. Implementations should call + /// destroyConstantImpl as the last thing they do, to destroy all users and + /// delete this. + virtual void destroyConstant() { assert(0 && "Not reached!"); } + + //// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const Constant *) { return true; } + static inline bool classof(const GlobalValue *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() >= ConstantFirstVal && + V->getValueID() <= ConstantLastVal; + } + + /// replaceUsesOfWithOnConstant - This method is a special form of + /// User::replaceUsesOfWith (which does not work on constants) that does work + /// on constants. Basically this method goes through the trouble of building + /// a new constant that is equivalent to the current one, with all uses of + /// From replaced with uses of To. After this construction is completed, all + /// of the users of 'this' are replaced to use the new constant, and then + /// 'this' is deleted. In general, you should not call this method, instead, + /// use Value::replaceAllUsesWith, which automatically dispatches to this + /// method as needed. + /// + virtual void replaceUsesOfWithOnConstant(Value *, Value *, Use *) { + // Provide a default implementation for constants (like integers) that + // cannot use any other values. This cannot be called at runtime, but needs + // to be here to avoid link errors. + assert(getNumOperands() == 0 && "replaceUsesOfWithOnConstant must be " + "implemented for all constants that have operands!"); + assert(0 && "Constants that do not have operands cannot be using 'From'!"); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Constants.h b/include/llvm/Constants.h new file mode 100644 index 0000000..9e95a08 --- /dev/null +++ b/include/llvm/Constants.h @@ -0,0 +1,887 @@ +//===-- llvm/Constants.h - Constant class subclass definitions --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file contains the declarations for the subclasses of Constant, +/// which represent the different flavors of constant values that live in LLVM. +/// Note that Constants are immutable (once created they never change) and are +/// fully shared by structural equivalence. This means that two structurally +/// equivalent constants will always have the same address. Constant's are +/// created on demand as needed and never deleted: thus clients don't have to +/// worry about the lifetime of the objects. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CONSTANTS_H +#define LLVM_CONSTANTS_H + +#include "llvm/Constant.h" +#include "llvm/Type.h" +#include "llvm/OperandTraits.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/SmallVector.h" + +namespace llvm { + +class ArrayType; +class StructType; +class PointerType; +class VectorType; + +template<class ConstantClass, class TypeClass, class ValType> +struct ConstantCreator; +template<class ConstantClass, class TypeClass> +struct ConvertConstantType; + +//===----------------------------------------------------------------------===// +/// This is the shared class of boolean and integer constants. This class +/// represents both boolean and integral constants. +/// @brief Class for constant integers. +class ConstantInt : public Constant { + static ConstantInt *TheTrueVal, *TheFalseVal; + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT + ConstantInt(const IntegerType *Ty, const APInt& V); + APInt Val; +protected: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + /// Return the constant as an APInt value reference. This allows clients to + /// obtain a copy of the value, with all its precision in tact. + /// @brief Return the constant's value. + inline const APInt& getValue() const { + return Val; + } + + /// getBitWidth - Return the bitwidth of this constant. + unsigned getBitWidth() const { return Val.getBitWidth(); } + + /// Return the constant as a 64-bit unsigned integer value after it + /// has been zero extended as appropriate for the type of this constant. Note + /// that this method can assert if the value does not fit in 64 bits. + /// @deprecated + /// @brief Return the zero extended value. + inline uint64_t getZExtValue() const { + return Val.getZExtValue(); + } + + /// Return the constant as a 64-bit integer value after it has been sign + /// extended as appropriate for the type of this constant. Note that + /// this method can assert if the value does not fit in 64 bits. + /// @deprecated + /// @brief Return the sign extended value. + inline int64_t getSExtValue() const { + return Val.getSExtValue(); + } + + /// A helper method that can be used to determine if the constant contained + /// within is equal to a constant. This only works for very small values, + /// because this is all that can be represented with all types. + /// @brief Determine if this constant's value is same as an unsigned char. + bool equalsInt(uint64_t V) const { + return Val == V; + } + + /// getTrue/getFalse - Return the singleton true/false values. + static inline ConstantInt *getTrue() { + if (TheTrueVal) return TheTrueVal; + return CreateTrueFalseVals(true); + } + static inline ConstantInt *getFalse() { + if (TheFalseVal) return TheFalseVal; + return CreateTrueFalseVals(false); + } + + /// Return a ConstantInt with the specified value for the specified type. The + /// value V will be canonicalized to an unsigned APInt. Accessing it with + /// either getSExtValue() or getZExtValue() will yield a correctly sized and + /// signed value for the type Ty. + /// @brief Get a ConstantInt for a specific value. + static ConstantInt *get(const Type *Ty, uint64_t V, bool isSigned = false); + + /// Return a ConstantInt with the specified value for the specified type. The + /// value V will be canonicalized to a an unsigned APInt. Accessing it with + /// either getSExtValue() or getZExtValue() will yield a correctly sized and + /// signed value for the type Ty. + /// @brief Get a ConstantInt for a specific signed value. + static ConstantInt *getSigned(const Type *Ty, int64_t V) { + return get(Ty, V, true); + } + + /// Return a ConstantInt with the specified value and an implied Type. The + /// type is the integer type that corresponds to the bit width of the value. + static ConstantInt *get(const APInt &V); + + /// getType - Specialize the getType() method to always return an IntegerType, + /// which reduces the amount of casting needed in parts of the compiler. + /// + inline const IntegerType *getType() const { + return reinterpret_cast<const IntegerType*>(Value::getType()); + } + + /// This static method returns true if the type Ty is big enough to + /// represent the value V. This can be used to avoid having the get method + /// assert when V is larger than Ty can represent. Note that there are two + /// versions of this method, one for unsigned and one for signed integers. + /// Although ConstantInt canonicalizes everything to an unsigned integer, + /// the signed version avoids callers having to convert a signed quantity + /// to the appropriate unsigned type before calling the method. + /// @returns true if V is a valid value for type Ty + /// @brief Determine if the value is in range for the given type. + static bool isValueValidForType(const Type *Ty, uint64_t V); + static bool isValueValidForType(const Type *Ty, int64_t V); + + /// This function will return true iff this constant represents the "null" + /// value that would be returned by the getNullValue method. + /// @returns true if this is the null integer value. + /// @brief Determine if the value is null. + virtual bool isNullValue() const { + return Val == 0; + } + + /// This is just a convenience method to make client code smaller for a + /// common code. It also correctly performs the comparison without the + /// potential for an assertion from getZExtValue(). + bool isZero() const { + return Val == 0; + } + + /// This is just a convenience method to make client code smaller for a + /// common case. It also correctly performs the comparison without the + /// potential for an assertion from getZExtValue(). + /// @brief Determine if the value is one. + bool isOne() const { + return Val == 1; + } + + /// This function will return true iff every bit in this constant is set + /// to true. + /// @returns true iff this constant's bits are all set to true. + /// @brief Determine if the value is all ones. + bool isAllOnesValue() const { + return Val.isAllOnesValue(); + } + + /// This function will return true iff this constant represents the largest + /// value that may be represented by the constant's type. + /// @returns true iff this is the largest value that may be represented + /// by this type. + /// @brief Determine if the value is maximal. + bool isMaxValue(bool isSigned) const { + if (isSigned) + return Val.isMaxSignedValue(); + else + return Val.isMaxValue(); + } + + /// This function will return true iff this constant represents the smallest + /// value that may be represented by this constant's type. + /// @returns true if this is the smallest value that may be represented by + /// this type. + /// @brief Determine if the value is minimal. + bool isMinValue(bool isSigned) const { + if (isSigned) + return Val.isMinSignedValue(); + else + return Val.isMinValue(); + } + + /// This function will return true iff this constant represents a value with + /// active bits bigger than 64 bits or a value greater than the given uint64_t + /// value. + /// @returns true iff this constant is greater or equal to the given number. + /// @brief Determine if the value is greater or equal to the given number. + bool uge(uint64_t Num) { + return Val.getActiveBits() > 64 || Val.getZExtValue() >= Num; + } + + /// getLimitedValue - If the value is smaller than the specified limit, + /// return it, otherwise return the limit value. This causes the value + /// to saturate to the limit. + /// @returns the min of the value of the constant and the specified value + /// @brief Get the constant's value with a saturation limit + uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const { + return Val.getLimitedValue(Limit); + } + + /// @returns the value for an integer constant of the given type that has all + /// its bits set to true. + /// @brief Get the all ones value + static ConstantInt *getAllOnesValue(const Type *Ty); + + /// @brief Methods to support type inquiry through isa, cast, and dyn_cast. + static inline bool classof(const ConstantInt *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantIntVal; + } + static void ResetTrueFalse() { TheTrueVal = TheFalseVal = 0; } +private: + static ConstantInt *CreateTrueFalseVals(bool WhichOne); +}; + + +//===----------------------------------------------------------------------===// +/// ConstantFP - Floating Point Values [float, double] +/// +class ConstantFP : public Constant { + APFloat Val; + void *operator new(size_t, unsigned);// DO NOT IMPLEMENT + ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT +protected: + ConstantFP(const Type *Ty, const APFloat& V); +protected: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + /// get() - Static factory methods - Return objects of the specified value + static ConstantFP *get(const APFloat &V); + + /// get() - This returns a constant fp for the specified value in the + /// specified type. This should only be used for simple constant values like + /// 2.0/1.0 etc, that are known-valid both as double and as the target format. + static ConstantFP *get(const Type *Ty, double V); + + /// isValueValidForType - return true if Ty is big enough to represent V. + static bool isValueValidForType(const Type *Ty, const APFloat& V); + inline const APFloat& getValueAPF() const { return Val; } + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. Don't depend on == for doubles to tell us it's zero, it + /// considers -0.0 to be null as well as 0.0. :( + virtual bool isNullValue() const; + + // Get a negative zero. + static ConstantFP *getNegativeZero(const Type* Ty); + + /// isExactlyValue - We don't rely on operator== working on double values, as + /// it returns true for things that are clearly not equal, like -0.0 and 0.0. + /// As such, this method can be used to do an exact bit-for-bit comparison of + /// two floating point values. The version with a double operand is retained + /// because it's so convenient to write isExactlyValue(2.0), but please use + /// it only for simple constants. + bool isExactlyValue(const APFloat& V) const; + + bool isExactlyValue(double V) const { + bool ignored; + // convert is not supported on this type + if (&Val.getSemantics() == &APFloat::PPCDoubleDouble) + return false; + APFloat FV(V); + FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored); + return isExactlyValue(FV); + } + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ConstantFP *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantFPVal; + } +}; + +//===----------------------------------------------------------------------===// +/// ConstantAggregateZero - All zero aggregate value +/// +class ConstantAggregateZero : public Constant { + friend struct ConstantCreator<ConstantAggregateZero, Type, char>; + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT +protected: + explicit ConstantAggregateZero(const Type *ty) + : Constant(ty, ConstantAggregateZeroVal, 0, 0) {} +protected: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + /// get() - static factory method for creating a null aggregate. It is + /// illegal to call this method with a non-aggregate type. + static ConstantAggregateZero *get(const Type *Ty); + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. + virtual bool isNullValue() const { return true; } + + virtual void destroyConstant(); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + /// + static bool classof(const ConstantAggregateZero *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantAggregateZeroVal; + } +}; + + +//===----------------------------------------------------------------------===// +/// ConstantArray - Constant Array Declarations +/// +class ConstantArray : public Constant { + friend struct ConstantCreator<ConstantArray, ArrayType, + std::vector<Constant*> >; + ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT +protected: + ConstantArray(const ArrayType *T, const std::vector<Constant*> &Val); +public: + /// get() - Static factory methods - Return objects of the specified value + static Constant *get(const ArrayType *T, const std::vector<Constant*> &); + static Constant *get(const ArrayType *T, + Constant*const*Vals, unsigned NumVals) { + // FIXME: make this the primary ctor method. + return get(T, std::vector<Constant*>(Vals, Vals+NumVals)); + } + + /// This method constructs a ConstantArray and initializes it with a text + /// string. The default behavior (AddNull==true) causes a null terminator to + /// be placed at the end of the array. This effectively increases the length + /// of the array by one (you've been warned). However, in some situations + /// this is not desired so if AddNull==false then the string is copied without + /// null termination. + static Constant *get(const std::string &Initializer, bool AddNull = true); + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); + + /// getType - Specialize the getType() method to always return an ArrayType, + /// which reduces the amount of casting needed in parts of the compiler. + /// + inline const ArrayType *getType() const { + return reinterpret_cast<const ArrayType*>(Value::getType()); + } + + /// isString - This method returns true if the array is an array of i8 and + /// the elements of the array are all ConstantInt's. + bool isString() const; + + /// isCString - This method returns true if the array is a string (see + /// @verbatim + /// isString) and it ends in a null byte \0 and does not contains any other + /// @endverbatim + /// null bytes except its terminator. + bool isCString() const; + + /// getAsString - If this array is isString(), then this method converts the + /// array to an std::string and returns it. Otherwise, it asserts out. + /// + std::string getAsString() const; + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. This always returns false because zero arrays are always + /// created as ConstantAggregateZero objects. + virtual bool isNullValue() const { return false; } + + virtual void destroyConstant(); + virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ConstantArray *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantArrayVal; + } +}; + +template <> +struct OperandTraits<ConstantArray> : VariadicOperandTraits<> { +}; + +DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantArray, Constant) + +//===----------------------------------------------------------------------===// +// ConstantStruct - Constant Struct Declarations +// +class ConstantStruct : public Constant { + friend struct ConstantCreator<ConstantStruct, StructType, + std::vector<Constant*> >; + ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT +protected: + ConstantStruct(const StructType *T, const std::vector<Constant*> &Val); +public: + /// get() - Static factory methods - Return objects of the specified value + /// + static Constant *get(const StructType *T, const std::vector<Constant*> &V); + static Constant *get(const std::vector<Constant*> &V, bool Packed = false); + static Constant *get(Constant*const* Vals, unsigned NumVals, + bool Packed = false) { + // FIXME: make this the primary ctor method. + return get(std::vector<Constant*>(Vals, Vals+NumVals), Packed); + } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); + + /// getType() specialization - Reduce amount of casting... + /// + inline const StructType *getType() const { + return reinterpret_cast<const StructType*>(Value::getType()); + } + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. This always returns false because zero structs are always + /// created as ConstantAggregateZero objects. + virtual bool isNullValue() const { + return false; + } + + virtual void destroyConstant(); + virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ConstantStruct *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantStructVal; + } +}; + +template <> +struct OperandTraits<ConstantStruct> : VariadicOperandTraits<> { +}; + +DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantStruct, Constant) + +//===----------------------------------------------------------------------===// +/// ConstantVector - Constant Vector Declarations +/// +class ConstantVector : public Constant { + friend struct ConstantCreator<ConstantVector, VectorType, + std::vector<Constant*> >; + ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT +protected: + ConstantVector(const VectorType *T, const std::vector<Constant*> &Val); +public: + /// get() - Static factory methods - Return objects of the specified value + static Constant *get(const VectorType *T, const std::vector<Constant*> &); + static Constant *get(const std::vector<Constant*> &V); + static Constant *get(Constant*const* Vals, unsigned NumVals) { + // FIXME: make this the primary ctor method. + return get(std::vector<Constant*>(Vals, Vals+NumVals)); + } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); + + /// getType - Specialize the getType() method to always return a VectorType, + /// which reduces the amount of casting needed in parts of the compiler. + /// + inline const VectorType *getType() const { + return reinterpret_cast<const VectorType*>(Value::getType()); + } + + /// @returns the value for a vector integer constant of the given type that + /// has all its bits set to true. + /// @brief Get the all ones value + static ConstantVector *getAllOnesValue(const VectorType *Ty); + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. This always returns false because zero vectors are always + /// created as ConstantAggregateZero objects. + virtual bool isNullValue() const { return false; } + + /// This function will return true iff every element in this vector constant + /// is set to all ones. + /// @returns true iff this constant's emements are all set to all ones. + /// @brief Determine if the value is all ones. + bool isAllOnesValue() const; + + /// getSplatValue - If this is a splat constant, meaning that all of the + /// elements have the same value, return that value. Otherwise return NULL. + Constant *getSplatValue(); + + virtual void destroyConstant(); + virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ConstantVector *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantVectorVal; + } +}; + +template <> +struct OperandTraits<ConstantVector> : VariadicOperandTraits<> { +}; + +DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantVector, Constant) + +//===----------------------------------------------------------------------===// +/// ConstantPointerNull - a constant pointer value that points to null +/// +class ConstantPointerNull : public Constant { + friend struct ConstantCreator<ConstantPointerNull, PointerType, char>; + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT +protected: + explicit ConstantPointerNull(const PointerType *T) + : Constant(reinterpret_cast<const Type*>(T), + Value::ConstantPointerNullVal, 0, 0) {} + +protected: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + /// get() - Static factory methods - Return objects of the specified value + static ConstantPointerNull *get(const PointerType *T); + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. + virtual bool isNullValue() const { return true; } + + virtual void destroyConstant(); + + /// getType - Specialize the getType() method to always return an PointerType, + /// which reduces the amount of casting needed in parts of the compiler. + /// + inline const PointerType *getType() const { + return reinterpret_cast<const PointerType*>(Value::getType()); + } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ConstantPointerNull *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantPointerNullVal; + } +}; + + +/// ConstantExpr - a constant value that is initialized with an expression using +/// other constant values. +/// +/// This class uses the standard Instruction opcodes to define the various +/// constant expressions. The Opcode field for the ConstantExpr class is +/// maintained in the Value::SubclassData field. +class ConstantExpr : public Constant { + friend struct ConstantCreator<ConstantExpr,Type, + std::pair<unsigned, std::vector<Constant*> > >; + friend struct ConvertConstantType<ConstantExpr, Type>; + +protected: + ConstantExpr(const Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps) + : Constant(ty, ConstantExprVal, Ops, NumOps) { + // Operation type (an Instruction opcode) is stored as the SubclassData. + SubclassData = Opcode; + } + + // These private methods are used by the type resolution code to create + // ConstantExprs in intermediate forms. + static Constant *getTy(const Type *Ty, unsigned Opcode, + Constant *C1, Constant *C2); + static Constant *getCompareTy(unsigned short pred, Constant *C1, + Constant *C2); + static Constant *getSelectTy(const Type *Ty, + Constant *C1, Constant *C2, Constant *C3); + static Constant *getGetElementPtrTy(const Type *Ty, Constant *C, + Value* const *Idxs, unsigned NumIdxs); + static Constant *getExtractElementTy(const Type *Ty, Constant *Val, + Constant *Idx); + static Constant *getInsertElementTy(const Type *Ty, Constant *Val, + Constant *Elt, Constant *Idx); + static Constant *getShuffleVectorTy(const Type *Ty, Constant *V1, + Constant *V2, Constant *Mask); + static Constant *getExtractValueTy(const Type *Ty, Constant *Agg, + const unsigned *Idxs, unsigned NumIdxs); + static Constant *getInsertValueTy(const Type *Ty, Constant *Agg, + Constant *Val, + const unsigned *Idxs, unsigned NumIdxs); + +public: + // Static methods to construct a ConstantExpr of different kinds. Note that + // these methods may return a object that is not an instance of the + // ConstantExpr class, because they will attempt to fold the constant + // expression into something simpler if possible. + + /// Cast constant expr + /// + static Constant *getTrunc (Constant *C, const Type *Ty); + static Constant *getSExt (Constant *C, const Type *Ty); + static Constant *getZExt (Constant *C, const Type *Ty); + static Constant *getFPTrunc (Constant *C, const Type *Ty); + static Constant *getFPExtend(Constant *C, const Type *Ty); + static Constant *getUIToFP (Constant *C, const Type *Ty); + static Constant *getSIToFP (Constant *C, const Type *Ty); + static Constant *getFPToUI (Constant *C, const Type *Ty); + static Constant *getFPToSI (Constant *C, const Type *Ty); + static Constant *getPtrToInt(Constant *C, const Type *Ty); + static Constant *getIntToPtr(Constant *C, const Type *Ty); + static Constant *getBitCast (Constant *C, const Type *Ty); + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); + + // @brief Convenience function for getting one of the casting operations + // using a CastOps opcode. + static Constant *getCast( + unsigned ops, ///< The opcode for the conversion + Constant *C, ///< The constant to be converted + const Type *Ty ///< The type to which the constant is converted + ); + + // @brief Create a ZExt or BitCast cast constant expression + static Constant *getZExtOrBitCast( + Constant *C, ///< The constant to zext or bitcast + const Type *Ty ///< The type to zext or bitcast C to + ); + + // @brief Create a SExt or BitCast cast constant expression + static Constant *getSExtOrBitCast( + Constant *C, ///< The constant to sext or bitcast + const Type *Ty ///< The type to sext or bitcast C to + ); + + // @brief Create a Trunc or BitCast cast constant expression + static Constant *getTruncOrBitCast( + Constant *C, ///< The constant to trunc or bitcast + const Type *Ty ///< The type to trunc or bitcast C to + ); + + /// @brief Create a BitCast or a PtrToInt cast constant expression + static Constant *getPointerCast( + Constant *C, ///< The pointer value to be casted (operand 0) + const Type *Ty ///< The type to which cast should be made + ); + + /// @brief Create a ZExt, Bitcast or Trunc for integer -> integer casts + static Constant *getIntegerCast( + Constant *C, ///< The integer constant to be casted + const Type *Ty, ///< The integer type to cast to + bool isSigned ///< Whether C should be treated as signed or not + ); + + /// @brief Create a FPExt, Bitcast or FPTrunc for fp -> fp casts + static Constant *getFPCast( + Constant *C, ///< The integer constant to be casted + const Type *Ty ///< The integer type to cast to + ); + + /// @brief Return true if this is a convert constant expression + bool isCast() const; + + /// @brief Return true if this is a compare constant expression + bool isCompare() const; + + /// @brief Return true if this is an insertvalue or extractvalue expression, + /// and the getIndices() method may be used. + bool hasIndices() const; + + /// Select constant expr + /// + static Constant *getSelect(Constant *C, Constant *V1, Constant *V2) { + return getSelectTy(V1->getType(), C, V1, V2); + } + + /// getAlignOf constant expr - computes the alignment of a type in a target + /// independent way (Note: the return type is an i32; Note: assumes that i8 + /// is byte aligned). + /// + static Constant *getAlignOf(const Type *Ty); + + /// getSizeOf constant expr - computes the size of a type in a target + /// independent way (Note: the return type is an i64). + /// + static Constant *getSizeOf(const Type *Ty); + + /// ConstantExpr::get - Return a binary or shift operator constant expression, + /// folding if possible. + /// + static Constant *get(unsigned Opcode, Constant *C1, Constant *C2); + + /// @brief Return an ICmp, FCmp, VICmp, or VFCmp comparison operator constant + /// expression. + static Constant *getCompare(unsigned short pred, Constant *C1, Constant *C2); + + /// ConstantExpr::get* - Return some common constants without having to + /// specify the full Instruction::OPCODE identifier. + /// + static Constant *getNeg(Constant *C); + static Constant *getNot(Constant *C); + static Constant *getAdd(Constant *C1, Constant *C2); + static Constant *getSub(Constant *C1, Constant *C2); + static Constant *getMul(Constant *C1, Constant *C2); + static Constant *getUDiv(Constant *C1, Constant *C2); + static Constant *getSDiv(Constant *C1, Constant *C2); + static Constant *getFDiv(Constant *C1, Constant *C2); + static Constant *getURem(Constant *C1, Constant *C2); // unsigned rem + static Constant *getSRem(Constant *C1, Constant *C2); // signed rem + static Constant *getFRem(Constant *C1, Constant *C2); + static Constant *getAnd(Constant *C1, Constant *C2); + static Constant *getOr(Constant *C1, Constant *C2); + static Constant *getXor(Constant *C1, Constant *C2); + static Constant *getICmp(unsigned short pred, Constant *LHS, Constant *RHS); + static Constant *getFCmp(unsigned short pred, Constant *LHS, Constant *RHS); + static Constant *getVICmp(unsigned short pred, Constant *LHS, Constant *RHS); + static Constant *getVFCmp(unsigned short pred, Constant *LHS, Constant *RHS); + static Constant *getShl(Constant *C1, Constant *C2); + static Constant *getLShr(Constant *C1, Constant *C2); + static Constant *getAShr(Constant *C1, Constant *C2); + + /// Getelementptr form. std::vector<Value*> is only accepted for convenience: + /// all elements must be Constant's. + /// + static Constant *getGetElementPtr(Constant *C, + Constant* const *IdxList, unsigned NumIdx); + static Constant *getGetElementPtr(Constant *C, + Value* const *IdxList, unsigned NumIdx); + + static Constant *getExtractElement(Constant *Vec, Constant *Idx); + static Constant *getInsertElement(Constant *Vec, Constant *Elt,Constant *Idx); + static Constant *getShuffleVector(Constant *V1, Constant *V2, Constant *Mask); + static Constant *getExtractValue(Constant *Agg, + const unsigned *IdxList, unsigned NumIdx); + static Constant *getInsertValue(Constant *Agg, Constant *Val, + const unsigned *IdxList, unsigned NumIdx); + + /// Floating point negation must be implemented with f(x) = -0.0 - x. This + /// method returns the negative zero constant for floating point or vector + /// floating point types; for all other types, it returns the null value. + static Constant *getZeroValueForNegationExpr(const Type *Ty); + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. + virtual bool isNullValue() const { return false; } + + /// getOpcode - Return the opcode at the root of this constant expression + unsigned getOpcode() const { return SubclassData; } + + /// getPredicate - Return the ICMP or FCMP predicate value. Assert if this is + /// not an ICMP or FCMP constant expression. + unsigned getPredicate() const; + + /// getIndices - Assert that this is an insertvalue or exactvalue + /// expression and return the list of indices. + const SmallVector<unsigned, 4> &getIndices() const; + + /// getOpcodeName - Return a string representation for an opcode. + const char *getOpcodeName() const; + + /// getWithOperandReplaced - Return a constant expression identical to this + /// one, but with the specified operand set to the specified value. + Constant *getWithOperandReplaced(unsigned OpNo, Constant *Op) const; + + /// getWithOperands - This returns the current constant expression with the + /// operands replaced with the specified values. The specified operands must + /// match count and type with the existing ones. + Constant *getWithOperands(const std::vector<Constant*> &Ops) const { + return getWithOperands(&Ops[0], (unsigned)Ops.size()); + } + Constant *getWithOperands(Constant* const *Ops, unsigned NumOps) const; + + virtual void destroyConstant(); + virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ConstantExpr *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == ConstantExprVal; + } +}; + +template <> +struct OperandTraits<ConstantExpr> : VariadicOperandTraits<1> { +}; + +DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantExpr, Constant) + +//===----------------------------------------------------------------------===// +/// UndefValue - 'undef' values are things that do not have specified contents. +/// These are used for a variety of purposes, including global variable +/// initializers and operands to instructions. 'undef' values can occur with +/// any type. +/// +class UndefValue : public Constant { + friend struct ConstantCreator<UndefValue, Type, char>; + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + UndefValue(const UndefValue &); // DO NOT IMPLEMENT +protected: + explicit UndefValue(const Type *T) : Constant(T, UndefValueVal, 0, 0) {} +protected: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + /// get() - Static factory methods - Return an 'undef' object of the specified + /// type. + /// + static UndefValue *get(const Type *T); + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. + virtual bool isNullValue() const { return false; } + + virtual void destroyConstant(); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const UndefValue *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == UndefValueVal; + } +}; + +//===----------------------------------------------------------------------===// +/// MDString - a single uniqued string. +/// These are used to efficiently contain a byte sequence for metadata. +/// +class MDString : public Constant { + MDString(const MDString &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + MDString(const char *begin, const char *end); + + const char *StrBegin, *StrEnd; +protected: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + /// get() - Static factory methods - Return objects of the specified value. + /// + static MDString *get(const char *StrBegin, const char *StrEnd); + + /// size() - The length of this string. + /// + intptr_t size() const { return StrEnd - StrBegin; } + + /// begin() - Pointer to the first byte of the string. + /// + const char *begin() const { return StrBegin; } + + /// end() - Pointer to one byte past the end of the string. + /// + const char *end() const { return StrEnd; } + + /// getType() specialization - Type is always MetadataTy. + /// + inline const Type *getType() const { + return Type::MetadataTy; + } + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. This always returns false because getNullValue will never + /// produce metadata. + virtual bool isNullValue() const { + return false; + } + + virtual void destroyConstant(); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MDString *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == MDStringVal; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Debugger/Debugger.h b/include/llvm/Debugger/Debugger.h new file mode 100644 index 0000000..5b0b97a --- /dev/null +++ b/include/llvm/Debugger/Debugger.h @@ -0,0 +1,175 @@ +//===- Debugger.h - LLVM debugger library interface -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the LLVM source-level debugger library interface. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGGER_DEBUGGER_H +#define LLVM_DEBUGGER_DEBUGGER_H + +#include <string> +#include <vector> + +namespace llvm { + class Module; + class InferiorProcess; + + /// Debugger class - This class implements the LLVM source-level debugger. + /// This allows clients to handle the user IO processing without having to + /// worry about how the debugger itself works. + /// + class Debugger { + // State the debugger needs when starting and stopping the program. + std::vector<std::string> ProgramArguments; + + // The environment to run the program with. This should eventually be + // changed to vector of strings when we allow the user to edit the + // environment. + const char * const *Environment; + + // Program - The currently loaded program, or null if none is loaded. + Module *Program; + + // Process - The currently executing inferior process. + InferiorProcess *Process; + + Debugger(const Debugger &); // DO NOT IMPLEMENT + void operator=(const Debugger &); // DO NOT IMPLEMENT + public: + Debugger(); + ~Debugger(); + + //===------------------------------------------------------------------===// + // Methods for manipulating and inspecting the execution environment. + // + + /// initializeEnvironment - Specify the environment the program should run + /// with. This is used to initialize the environment of the program to the + /// environment of the debugger. + void initializeEnvironment(const char *const *envp) { + Environment = envp; + } + + /// setWorkingDirectory - Specify the working directory for the program to + /// be started from. + void setWorkingDirectory(const std::string &Dir) { + // FIXME: implement + } + + template<typename It> + void setProgramArguments(It I, It E) { + ProgramArguments.assign(I, E); + } + unsigned getNumProgramArguments() const { + return static_cast<unsigned>(ProgramArguments.size()); + } + const std::string &getProgramArgument(unsigned i) const { + return ProgramArguments[i]; + } + + + //===------------------------------------------------------------------===// + // Methods for manipulating and inspecting the program currently loaded. + // + + /// isProgramLoaded - Return true if there is a program currently loaded. + /// + bool isProgramLoaded() const { return Program != 0; } + + /// getProgram - Return the LLVM module corresponding to the program. + /// + Module *getProgram() const { return Program; } + + /// getProgramPath - Get the path of the currently loaded program, or an + /// empty string if none is loaded. + std::string getProgramPath() const; + + /// loadProgram - If a program is currently loaded, unload it. Then search + /// the PATH for the specified program, loading it when found. If the + /// specified program cannot be found, an exception is thrown to indicate + /// the error. + void loadProgram(const std::string &Path); + + /// unloadProgram - If a program is running, kill it, then unload all traces + /// of the current program. If no program is loaded, this method silently + /// succeeds. + void unloadProgram(); + + //===------------------------------------------------------------------===// + // Methods for manipulating and inspecting the program currently running. + // + // If the program is running, and the debugger is active, then we know that + // the program has stopped. This being the case, we can inspect the + // program, ask it for its source location, set breakpoints, etc. + // + + /// isProgramRunning - Return true if a program is loaded and has a + /// currently active instance. + bool isProgramRunning() const { return Process != 0; } + + /// getRunningProcess - If there is no program running, throw an exception. + /// Otherwise return the running process so that it can be inspected by the + /// debugger. + const InferiorProcess &getRunningProcess() const { + if (Process == 0) throw "No process running."; + return *Process; + } + + /// createProgram - Create an instance of the currently loaded program, + /// killing off any existing one. This creates the program and stops it at + /// the first possible moment. If there is no program loaded or if there is + /// a problem starting the program, this method throws an exception. + void createProgram(); + + /// killProgram - If the program is currently executing, kill off the + /// process and free up any state related to the currently running program. + /// If there is no program currently running, this just silently succeeds. + /// If something horrible happens when killing the program, an exception + /// gets thrown. + void killProgram(); + + + //===------------------------------------------------------------------===// + // Methods for continuing execution. These methods continue the execution + // of the program by some amount. If the program is successfully stopped, + // execution returns, otherwise an exception is thrown. + // + // NOTE: These methods should always be used in preference to directly + // accessing the Dbg object, because these will delete the Process object if + // the process unexpectedly dies. + // + + /// stepProgram - Implement the 'step' command, continuing execution until + /// the next possible stop point. + void stepProgram(); + + /// nextProgram - Implement the 'next' command, continuing execution until + /// the next possible stop point that is in the current function. + void nextProgram(); + + /// finishProgram - Implement the 'finish' command, continuing execution + /// until the specified frame ID returns. + void finishProgram(void *Frame); + + /// contProgram - Implement the 'cont' command, continuing execution until + /// the next breakpoint is encountered. + void contProgram(); + }; + + class NonErrorException { + std::string Message; + public: + NonErrorException(const std::string &M) : Message(M) {} + const std::string &getMessage() const { return Message; } + }; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Debugger/InferiorProcess.h b/include/llvm/Debugger/InferiorProcess.h new file mode 100644 index 0000000..71d138b --- /dev/null +++ b/include/llvm/Debugger/InferiorProcess.h @@ -0,0 +1,137 @@ +//===- InferiorProcess.h - Represent the program being debugged -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the InferiorProcess class, which is used to represent, +// inspect, and manipulate a process under the control of the LLVM debugger. +// +// This is an abstract class which should allow various different types of +// implementations. Initially we implement a unix specific debugger backend +// that does not require code generator support, but we could eventually use +// code generator support with ptrace, support windows based targets, supported +// remote targets, etc. +// +// If the inferior process unexpectedly dies, an attempt to communicate with it +// will cause an InferiorProcessDead exception to be thrown, indicating the exit +// code of the process. When this occurs, no methods on the InferiorProcess +// class should be called except for the destructor. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGGER_INFERIORPROCESS_H +#define LLVM_DEBUGGER_INFERIORPROCESS_H + +#include <string> +#include <vector> + +namespace llvm { + class Module; + class GlobalVariable; + + /// InferiorProcessDead exception - This class is thrown by methods that + /// communicate with the interior process if the process unexpectedly exits or + /// dies. The instance variable indicates what the exit code of the process + /// was, or -1 if unknown. + class InferiorProcessDead { + int ExitCode; + public: + InferiorProcessDead(int EC) : ExitCode(EC) {} + int getExitCode() const { return ExitCode; } + }; + + /// InferiorProcess class - This class represents the process being debugged + /// by the debugger. Objects of this class should not be stack allocated, + /// because the destructor can throw exceptions. + /// + class InferiorProcess { + Module *M; + protected: + InferiorProcess(Module *m) : M(m) {} + public: + /// create - Create an inferior process of the specified module, and + /// stop it at the first opportunity. If there is a problem starting the + /// program (for example, it has no main), throw an exception. + static InferiorProcess *create(Module *M, + const std::vector<std::string> &Arguments, + const char * const *envp); + + // InferiorProcess destructor - Kill the current process. If something + // terrible happens, we throw an exception from the destructor. + virtual ~InferiorProcess() {} + + //===------------------------------------------------------------------===// + // Status methods - These methods return information about the currently + // stopped process. + // + + /// getStatus - Return a status message that is specific to the current type + /// of inferior process that is created. This can return things like the + /// PID of the inferior or other potentially interesting things. + virtual std::string getStatus() const { + return ""; + } + + //===------------------------------------------------------------------===// + // Methods for inspecting the call stack. + // + + /// getPreviousFrame - Given the descriptor for the current stack frame, + /// return the descriptor for the caller frame. This returns null when it + /// runs out of frames. If Frame is null, the initial frame should be + /// returned. + virtual void *getPreviousFrame(void *Frame) const = 0; + + /// getSubprogramDesc - Return the subprogram descriptor for the current + /// stack frame. + virtual const GlobalVariable *getSubprogramDesc(void *Frame) const = 0; + + /// getFrameLocation - This method returns the source location where each + /// stack frame is stopped. + virtual void getFrameLocation(void *Frame, unsigned &LineNo, + unsigned &ColNo, + const GlobalVariable *&SourceDesc) const = 0; + + //===------------------------------------------------------------------===// + // Methods for manipulating breakpoints. + // + + /// addBreakpoint - This method adds a breakpoint at the specified line, + /// column, and source file, and returns a unique identifier for it. + /// + /// It is up to the debugger to determine whether or not there is actually a + /// stop-point that corresponds with the specified location. + virtual unsigned addBreakpoint(unsigned LineNo, unsigned ColNo, + const GlobalVariable *SourceDesc) = 0; + + /// removeBreakpoint - This deletes the breakpoint with the specified ID + /// number. + virtual void removeBreakpoint(unsigned ID) = 0; + + + //===------------------------------------------------------------------===// + // Execution methods - These methods cause the program to continue execution + // by some amount. If the program successfully stops, this returns. + // Otherwise, if the program unexpectedly terminates, an InferiorProcessDead + // exception is thrown. + // + + /// stepProgram - Implement the 'step' command, continuing execution until + /// the next possible stop point. + virtual void stepProgram() = 0; + + /// finishProgram - Implement the 'finish' command, continuing execution + /// until the current function returns. + virtual void finishProgram(void *Frame) = 0; + + /// contProgram - Implement the 'cont' command, continuing execution until + /// a breakpoint is encountered. + virtual void contProgram() = 0; + }; +} // end namespace llvm + +#endif diff --git a/include/llvm/Debugger/ProgramInfo.h b/include/llvm/Debugger/ProgramInfo.h new file mode 100644 index 0000000..5c07c86 --- /dev/null +++ b/include/llvm/Debugger/ProgramInfo.h @@ -0,0 +1,246 @@ +//===- ProgramInfo.h - Information about the loaded program -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various pieces of information about the currently loaded +// program. One instance of this object is created every time a program is +// loaded, and destroyed every time it is unloaded. +// +// The various pieces of information gathered about the source program are all +// designed to be extended by various SourceLanguage implementations. This +// allows source languages to keep any extended information that they support in +// the derived class portions of the class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGGER_PROGRAMINFO_H +#define LLVM_DEBUGGER_PROGRAMINFO_H + +#include "llvm/System/TimeValue.h" +#include <string> +#include <map> +#include <vector> + +namespace llvm { + class GlobalVariable; + class Module; + class SourceFile; + class SourceLanguage; + class ProgramInfo; + + /// SourceLanguageCache - SourceLanguage implementations are allowed to cache + /// stuff in the ProgramInfo object. The only requirement we have on these + /// instances is that they are destroyable. + struct SourceLanguageCache { + virtual ~SourceLanguageCache() {} + }; + + /// SourceFileInfo - One instance of this structure is created for each + /// source file in the program. + /// + class SourceFileInfo { + /// BaseName - The filename of the source file. + std::string BaseName; + + /// Directory - The working directory of this source file when it was + /// compiled. + std::string Directory; + + /// Version - The version of the LLVM debug information that this file was + /// compiled with. + unsigned Version; + + /// Language - The source language that the file was compiled with. This + /// pointer is never null. + /// + const SourceLanguage *Language; + + /// Descriptor - The LLVM Global Variable which describes the source file. + /// + const GlobalVariable *Descriptor; + + /// SourceText - The body of this source file, or null if it has not yet + /// been loaded. + mutable SourceFile *SourceText; + public: + SourceFileInfo(const GlobalVariable *Desc, const SourceLanguage &Lang); + ~SourceFileInfo(); + + const std::string &getBaseName() const { return BaseName; } + const std::string &getDirectory() const { return Directory; } + unsigned getDebugVersion() const { return Version; } + const GlobalVariable *getDescriptor() const { return Descriptor; } + SourceFile &getSourceText() const; + + const SourceLanguage &getLanguage() const { return *Language; } + }; + + + /// SourceFunctionInfo - An instance of this class is used to represent each + /// source function in the program. + /// + class SourceFunctionInfo { + /// Name - This contains an abstract name that is potentially useful to the + /// end-user. If there is no explicit support for the current language, + /// then this string is used to identify the function. + std::string Name; + + /// Descriptor - The descriptor for this function. + /// + const GlobalVariable *Descriptor; + + /// SourceFile - The file that this function is defined in. + /// + const SourceFileInfo *SourceFile; + + /// LineNo, ColNo - The location of the first stop-point in the function. + /// These are computed on demand. + mutable unsigned LineNo, ColNo; + + public: + SourceFunctionInfo(ProgramInfo &PI, const GlobalVariable *Desc); + virtual ~SourceFunctionInfo() {} + + /// getSymbolicName - Return a human-readable symbolic name to identify the + /// function (for example, in stack traces). + virtual std::string getSymbolicName() const { return Name; } + + /// getDescriptor - This returns the descriptor for the function. + /// + const GlobalVariable *getDescriptor() const { return Descriptor; } + + /// getSourceFile - This returns the source file that defines the function. + /// + const SourceFileInfo &getSourceFile() const { return *SourceFile; } + + /// getSourceLocation - This method returns the location of the first + /// stopping point in the function. If the body of the function cannot be + /// found, this returns zeros for both values. + void getSourceLocation(unsigned &LineNo, unsigned &ColNo) const; + }; + + + /// ProgramInfo - This object contains information about the loaded program. + /// When a new program is loaded, an instance of this class is created. When + /// the program is unloaded, the instance is destroyed. This object basically + /// manages the lazy computation of information useful for the debugger. + class ProgramInfo { + Module *M; + + /// ProgramTimeStamp - This is the timestamp of the executable file that we + /// currently have loaded into the debugger. + sys::TimeValue ProgramTimeStamp; + + /// SourceFiles - This map is used to transform source file descriptors into + /// their corresponding SourceFileInfo objects. This mapping owns the + /// memory for the SourceFileInfo objects. + /// + bool SourceFilesIsComplete; + std::map<const GlobalVariable*, SourceFileInfo*> SourceFiles; + + /// SourceFileIndex - Mapping from source file basenames to the information + /// about the file. Note that there can be filename collisions, so this is + /// a multimap. This map is populated incrementally as the user interacts + /// with the program, through the getSourceFileFromDesc method. If ALL of + /// the source files are needed, the getSourceFiles() method scans the + /// entire program looking for them. + /// + std::multimap<std::string, SourceFileInfo*> SourceFileIndex; + + /// SourceFunctions - This map contains entries functions in the source + /// program. If SourceFunctionsIsComplete is true, then this is ALL of the + /// functions in the program are in this map. + bool SourceFunctionsIsComplete; + std::map<const GlobalVariable*, SourceFunctionInfo*> SourceFunctions; + + /// LanguageCaches - Each source language is permitted to keep a per-program + /// cache of information specific to whatever it needs. This vector is + /// effectively a small map from the languages that are active in the + /// program to their caches. This can be accessed by the language by the + /// "getLanguageCache" method. + std::vector<std::pair<const SourceLanguage*, + SourceLanguageCache*> > LanguageCaches; + public: + ProgramInfo(Module *m); + ~ProgramInfo(); + + /// getProgramTimeStamp - Return the time-stamp of the program when it was + /// loaded. + sys::TimeValue getProgramTimeStamp() const { return ProgramTimeStamp; } + + //===------------------------------------------------------------------===// + // Interfaces to the source code files that make up the program. + // + + /// getSourceFile - Return source file information for the specified source + /// file descriptor object, adding it to the collection as needed. This + /// method always succeeds (is unambiguous), and is always efficient. + /// + const SourceFileInfo &getSourceFile(const GlobalVariable *Desc); + + /// getSourceFile - Look up the file with the specified name. If there is + /// more than one match for the specified filename, prompt the user to pick + /// one. If there is no source file that matches the specified name, throw + /// an exception indicating that we can't find the file. Otherwise, return + /// the file information for that file. + /// + /// If the source file hasn't been discovered yet in the program, this + /// method might have to index the whole program by calling the + /// getSourceFiles() method. + /// + const SourceFileInfo &getSourceFile(const std::string &Filename); + + /// getSourceFiles - Index all of the source files in the program and return + /// them. This information is lazily computed the first time that it is + /// requested. Since this information can take a long time to compute, the + /// user is given a chance to cancel it. If this occurs, an exception is + /// thrown. + const std::map<const GlobalVariable*, SourceFileInfo*> & + getSourceFiles(bool RequiresCompleteMap = true); + + //===------------------------------------------------------------------===// + // Interfaces to the functions that make up the program. + // + + /// getFunction - Return source function information for the specified + /// function descriptor object, adding it to the collection as needed. This + /// method always succeeds (is unambiguous), and is always efficient. + /// + const SourceFunctionInfo &getFunction(const GlobalVariable *Desc); + + /// getSourceFunctions - Index all of the functions in the program and + /// return them. This information is lazily computed the first time that it + /// is requested. Since this information can take a long time to compute, + /// the user is given a chance to cancel it. If this occurs, an exception + /// is thrown. + const std::map<const GlobalVariable*, SourceFunctionInfo*> & + getSourceFunctions(bool RequiresCompleteMap = true); + + /// addSourceFunctionsRead - Return true if the source functions map is + /// complete: that is, all functions in the program have been read in. + bool allSourceFunctionsRead() const { return SourceFunctionsIsComplete; } + + /// getLanguageCache - This method is used to build per-program caches of + /// information, such as the functions or types visible to the program. + /// This can be used by SourceLanguage implementations because it requires + /// an accessible [sl]::CacheType typedef, where [sl] is the C++ type of the + /// source-language subclass. + template<typename SL> + typename SL::CacheType &getLanguageCache(const SL *L) { + for (unsigned i = 0, e = LanguageCaches.size(); i != e; ++i) + if (LanguageCaches[i].first == L) + return *(typename SL::CacheType*)LanguageCaches[i].second; + typename SL::CacheType *NewCache = L->createSourceLanguageCache(*this); + LanguageCaches.push_back(std::make_pair(L, NewCache)); + return *NewCache; + } + }; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Debugger/RuntimeInfo.h b/include/llvm/Debugger/RuntimeInfo.h new file mode 100644 index 0000000..c537651 --- /dev/null +++ b/include/llvm/Debugger/RuntimeInfo.h @@ -0,0 +1,142 @@ +//===- RuntimeInfo.h - Information about running program --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines classes that capture various pieces of information about +// the currently executing, but stopped, program. One instance of this object +// is created every time a program is stopped, and destroyed every time it +// starts running again. This object's main goal is to make access to runtime +// information easy and efficient, by caching information as requested. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGGER_RUNTIMEINFO_H +#define LLVM_DEBUGGER_RUNTIMEINFO_H + +#include <vector> +#include <cassert> + +namespace llvm { + class ProgramInfo; + class RuntimeInfo; + class InferiorProcess; + class GlobalVariable; + class SourceFileInfo; + + /// StackFrame - One instance of this structure is created for each stack + /// frame that is active in the program. + /// + class StackFrame { + RuntimeInfo &RI; + void *FrameID; + const GlobalVariable *FunctionDesc; + + /// LineNo, ColNo, FileInfo - This information indicates WHERE in the source + /// code for the program the stack frame is located. + unsigned LineNo, ColNo; + const SourceFileInfo *SourceInfo; + public: + StackFrame(RuntimeInfo &RI, void *ParentFrameID); + + StackFrame &operator=(const StackFrame &RHS) { + FrameID = RHS.FrameID; + FunctionDesc = RHS.FunctionDesc; + return *this; + } + + /// getFrameID - return the low-level opaque frame ID of this stack frame. + /// + void *getFrameID() const { return FrameID; } + + /// getFunctionDesc - Return the descriptor for the function that contains + /// this stack frame, or null if it is unknown. + /// + const GlobalVariable *getFunctionDesc(); + + /// getSourceLocation - Return the source location that this stack frame is + /// sitting at. + void getSourceLocation(unsigned &LineNo, unsigned &ColNo, + const SourceFileInfo *&SourceInfo); + }; + + + /// RuntimeInfo - This class collects information about the currently running + /// process. It is created whenever the program stops execution for the + /// debugger, and destroyed whenver execution continues. + class RuntimeInfo { + /// ProgInfo - This object contains static information about the program. + /// + ProgramInfo *ProgInfo; + + /// IP - This object contains information about the actual inferior process + /// that we are communicating with and aggregating information from. + const InferiorProcess &IP; + + /// CallStack - This caches information about the current stack trace of the + /// program. This is lazily computed as needed. + std::vector<StackFrame> CallStack; + + /// CurrentFrame - The user can traverse the stack frame with the + /// up/down/frame family of commands. This index indicates the current + /// stack frame. + unsigned CurrentFrame; + + public: + RuntimeInfo(ProgramInfo *PI, const InferiorProcess &ip) + : ProgInfo(PI), IP(ip), CurrentFrame(0) { + // Make sure that the top of stack has been materialized. If this throws + // an exception, something is seriously wrong and the RuntimeInfo object + // would be unusable anyway. + getStackFrame(0); + } + + ProgramInfo &getProgramInfo() { return *ProgInfo; } + const InferiorProcess &getInferiorProcess() const { return IP; } + + //===------------------------------------------------------------------===// + // Methods for inspecting the call stack of the program. + // + + /// getStackFrame - Materialize the specified stack frame and return it. If + /// the specified ID is off of the bottom of the stack, throw an exception + /// indicating the problem. + StackFrame &getStackFrame(unsigned ID) { + if (ID >= CallStack.size()) + materializeFrame(ID); + return CallStack[ID]; + } + + /// getCurrentFrame - Return the current stack frame object that the user is + /// inspecting. + StackFrame &getCurrentFrame() { + assert(CallStack.size() > CurrentFrame && + "Must have materialized frame before making it current!"); + return CallStack[CurrentFrame]; + } + + /// getCurrentFrameIdx - Return the current frame the user is inspecting. + /// + unsigned getCurrentFrameIdx() const { return CurrentFrame; } + + /// setCurrentFrameIdx - Set the current frame index to the specified value. + /// Note that the specified frame must have been materialized with + /// getStackFrame before it can be made current. + void setCurrentFrameIdx(unsigned Idx) { + assert(Idx < CallStack.size() && + "Must materialize frame before making it current!"); + CurrentFrame = Idx; + } + private: + /// materializeFrame - Create and process all frames up to and including the + /// specified frame number. This throws an exception if the specified frame + /// ID is nonexistant. + void materializeFrame(unsigned ID); + }; +} + +#endif diff --git a/include/llvm/Debugger/SourceFile.h b/include/llvm/Debugger/SourceFile.h new file mode 100644 index 0000000..249435a --- /dev/null +++ b/include/llvm/Debugger/SourceFile.h @@ -0,0 +1,87 @@ +//===- SourceFile.h - Class to represent a source code file -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SourceFile class which is used to represent a single +// file of source code in the program, caching data from the file to make access +// efficient. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGGER_SOURCEFILE_H +#define LLVM_DEBUGGER_SOURCEFILE_H + +#include "llvm/System/Path.h" +#include "llvm/ADT/OwningPtr.h" +#include <vector> + +namespace llvm { + class GlobalVariable; + class MemoryBuffer; + + class SourceFile { + /// Filename - This is the full path of the file that is loaded. + /// + sys::Path Filename; + + /// Descriptor - The debugging descriptor for this source file. If there + /// are multiple descriptors for the same file, this is just the first one + /// encountered. + /// + const GlobalVariable *Descriptor; + + /// This is the memory mapping for the file so we can gain access to it. + OwningPtr<MemoryBuffer> File; + + /// LineOffset - This vector contains a mapping from source line numbers to + /// their offsets in the file. This data is computed lazily, the first time + /// it is asked for. If there are zero elements allocated in this vector, + /// then it has not yet been computed. + mutable std::vector<unsigned> LineOffset; + + public: + /// SourceFile constructor - Read in the specified source file if it exists, + /// but do not build the LineOffsets table until it is requested. This will + /// NOT throw an exception if the file is not found, if there is an error + /// reading it, or if the user cancels the operation. Instead, it will just + /// be an empty source file. + SourceFile(const std::string &fn, const GlobalVariable *Desc); + + ~SourceFile(); + + /// getDescriptor - Return the debugging decriptor for this source file. + /// + const GlobalVariable *getDescriptor() const { return Descriptor; } + + /// getFilename - Return the fully resolved path that this file was loaded + /// from. + const std::string &getFilename() const { return Filename.toString(); } + + /// getSourceLine - Given a line number, return the start and end of the + /// line in the file. If the line number is invalid, or if the file could + /// not be loaded, null pointers are returned for the start and end of the + /// file. Note that line numbers start with 0, not 1. This also strips off + /// any newlines from the end of the line, to ease formatting of the text. + void getSourceLine(unsigned LineNo, const char *&LineStart, + const char *&LineEnd) const; + + /// getNumLines - Return the number of lines the source file contains. + /// + unsigned getNumLines() const { + if (LineOffset.empty()) calculateLineOffsets(); + return static_cast<unsigned>(LineOffset.size()); + } + + private: + /// calculateLineOffsets - Compute the LineOffset vector for the current + /// file. + void calculateLineOffsets() const; + }; +} // end namespace llvm + +#endif diff --git a/include/llvm/Debugger/SourceLanguage.h b/include/llvm/Debugger/SourceLanguage.h new file mode 100644 index 0000000..a07dd97 --- /dev/null +++ b/include/llvm/Debugger/SourceLanguage.h @@ -0,0 +1,99 @@ +//===- SourceLanguage.h - Interact with source languages --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the abstract SourceLanguage interface, which is used by the +// LLVM debugger to parse source-language expressions and render program objects +// into a human readable string. In general, these classes perform all of the +// analysis and interpretation of the language-specific debugger information. +// +// This interface is designed to be completely stateless, so all methods are +// const. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGGER_SOURCELANGUAGE_H +#define LLVM_DEBUGGER_SOURCELANGUAGE_H + +#include <string> + +namespace llvm { + class GlobalVariable; + class SourceFileInfo; + class SourceFunctionInfo; + class ProgramInfo; + class RuntimeInfo; + + struct SourceLanguage { + virtual ~SourceLanguage() {} + + /// getSourceLanguageName - This method is used to implement the 'show + /// language' command in the debugger. + virtual const char *getSourceLanguageName() const = 0; + + //===------------------------------------------------------------------===// + // Methods used to implement debugger hooks. + // + + /// printInfo - Implementing this method allows the debugger to use + /// language-specific 'info' extensions, e.g., 'info selectors' for objc. + /// This method should return true if the specified string is recognized. + /// + virtual bool printInfo(const std::string &What) const { + return false; + } + + /// lookupFunction - Given a textual function name, return the + /// SourceFunctionInfo descriptor for that function, or null if it cannot be + /// found. If the program is currently running, the RuntimeInfo object + /// provides information about the current evaluation context, otherwise it + /// will be null. + /// + virtual SourceFunctionInfo *lookupFunction(const std::string &FunctionName, + ProgramInfo &PI, + RuntimeInfo *RI = 0) const { + return 0; + } + + + //===------------------------------------------------------------------===// + // Methods used to parse various pieces of program information. + // + + /// createSourceFileInfo - This method can be implemented by the front-end + /// if it needs to keep track of information beyond what the debugger + /// requires. + virtual SourceFileInfo * + createSourceFileInfo(const GlobalVariable *Desc, ProgramInfo &PI) const; + + /// createSourceFunctionInfo - This method can be implemented by the derived + /// SourceLanguage if it needs to keep track of more information than the + /// SourceFunctionInfo has. + virtual SourceFunctionInfo * + createSourceFunctionInfo(const GlobalVariable *Desc, ProgramInfo &PI) const; + + + //===------------------------------------------------------------------===// + // Static methods used to get instances of various source languages. + // + + /// get - This method returns a source-language instance for the specified + /// Dwarf 3 language identifier. If the language is unknown, an object is + /// returned that can support some minimal operations, but is not terribly + /// bright. + static const SourceLanguage &get(unsigned ID); + + /// get*Instance() - These methods return specific instances of languages. + /// + static const SourceLanguage &getCFamilyInstance(); + static const SourceLanguage &getCPlusPlusInstance(); + static const SourceLanguage &getUnknownLanguageInstance(); + }; +} + +#endif diff --git a/include/llvm/DerivedTypes.h b/include/llvm/DerivedTypes.h new file mode 100644 index 0000000..dd6d908 --- /dev/null +++ b/include/llvm/DerivedTypes.h @@ -0,0 +1,471 @@ +//===-- llvm/DerivedTypes.h - Classes for handling data types ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of classes that represent "derived +// types". These are things like "arrays of x" or "structure of x, y, z" or +// "method returning x taking (y,z) as parameters", etc... +// +// The implementations of these classes live in the Type.cpp file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DERIVED_TYPES_H +#define LLVM_DERIVED_TYPES_H + +#include "llvm/Type.h" + +namespace llvm { + +class Value; +template<class ValType, class TypeClass> class TypeMap; +class FunctionValType; +class ArrayValType; +class StructValType; +class PointerValType; +class VectorValType; +class IntegerValType; +class APInt; + +class DerivedType : public Type { + friend class Type; + +protected: + explicit DerivedType(TypeID id) : Type(id) {} + + /// notifyUsesThatTypeBecameConcrete - Notify AbstractTypeUsers of this type + /// that the current type has transitioned from being abstract to being + /// concrete. + /// + void notifyUsesThatTypeBecameConcrete(); + + /// dropAllTypeUses - When this (abstract) type is resolved to be equal to + /// another (more concrete) type, we must eliminate all references to other + /// types, to avoid some circular reference problems. + /// + void dropAllTypeUses(); + +public: + + //===--------------------------------------------------------------------===// + // Abstract Type handling methods - These types have special lifetimes, which + // are managed by (add|remove)AbstractTypeUser. See comments in + // AbstractTypeUser.h for more information. + + /// refineAbstractTypeTo - This function is used to when it is discovered that + /// the 'this' abstract type is actually equivalent to the NewType specified. + /// This causes all users of 'this' to switch to reference the more concrete + /// type NewType and for 'this' to be deleted. + /// + void refineAbstractTypeTo(const Type *NewType); + + void dump() const { Type::dump(); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DerivedType *) { return true; } + static inline bool classof(const Type *T) { + return T->isDerivedType(); + } +}; + +/// Class to represent integer types. Note that this class is also used to +/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and +/// Int64Ty. +/// @brief Integer representation type +class IntegerType : public DerivedType { +protected: + explicit IntegerType(unsigned NumBits) : DerivedType(IntegerTyID) { + setSubclassData(NumBits); + } + friend class TypeMap<IntegerValType, IntegerType>; +public: + /// This enum is just used to hold constants we need for IntegerType. + enum { + MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified + MAX_INT_BITS = (1<<23)-1 ///< Maximum number of bits that can be specified + ///< Note that bit width is stored in the Type classes SubclassData field + ///< which has 23 bits. This yields a maximum bit width of 8,388,607 bits. + }; + + /// This static method is the primary way of constructing an IntegerType. + /// If an IntegerType with the same NumBits value was previously instantiated, + /// that instance will be returned. Otherwise a new one will be created. Only + /// one instance with a given NumBits value is ever created. + /// @brief Get or create an IntegerType instance. + static const IntegerType* get(unsigned NumBits); + + /// @brief Get the number of bits in this IntegerType + unsigned getBitWidth() const { return getSubclassData(); } + + /// getBitMask - Return a bitmask with ones set for all of the bits + /// that can be set by an unsigned version of this type. This is 0xFF for + /// i8, 0xFFFF for i16, etc. + uint64_t getBitMask() const { + return ~uint64_t(0UL) >> (64-getBitWidth()); + } + + /// getSignBit - Return a uint64_t with just the most significant bit set (the + /// sign bit, if the value is treated as a signed number). + uint64_t getSignBit() const { + return 1ULL << (getBitWidth()-1); + } + + /// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc. + /// @returns a bit mask with ones set for all the bits of this type. + /// @brief Get a bit mask for this type. + APInt getMask() const; + + /// This method determines if the width of this IntegerType is a power-of-2 + /// in terms of 8 bit bytes. + /// @returns true if this is a power-of-2 byte width. + /// @brief Is this a power-of-2 byte-width IntegerType ? + bool isPowerOf2ByteWidth() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const IntegerType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == IntegerTyID; + } +}; + + +/// FunctionType - Class to represent function types +/// +class FunctionType : public DerivedType { + friend class TypeMap<FunctionValType, FunctionType>; + bool isVarArgs; + + FunctionType(const FunctionType &); // Do not implement + const FunctionType &operator=(const FunctionType &); // Do not implement + FunctionType(const Type *Result, const std::vector<const Type*> &Params, + bool IsVarArgs); + +public: + /// FunctionType::get - This static method is the primary way of constructing + /// a FunctionType. + /// + static FunctionType *get( + const Type *Result, ///< The result type + const std::vector<const Type*> &Params, ///< The types of the parameters + bool isVarArg ///< Whether this is a variable argument length function + ); + + /// isValidReturnType - Return true if the specified type is valid as a return + /// type. + static bool isValidReturnType(const Type *RetTy); + + inline bool isVarArg() const { return isVarArgs; } + inline const Type *getReturnType() const { return ContainedTys[0]; } + + typedef Type::subtype_iterator param_iterator; + param_iterator param_begin() const { return ContainedTys + 1; } + param_iterator param_end() const { return &ContainedTys[NumContainedTys]; } + + // Parameter type accessors... + const Type *getParamType(unsigned i) const { return ContainedTys[i+1]; } + + /// getNumParams - Return the number of fixed parameters this function type + /// requires. This does not consider varargs. + /// + unsigned getNumParams() const { return NumContainedTys - 1; } + + // Implement the AbstractTypeUser interface. + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + virtual void typeBecameConcrete(const DerivedType *AbsTy); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FunctionType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == FunctionTyID; + } +}; + + +/// CompositeType - Common super class of ArrayType, StructType, PointerType +/// and VectorType +class CompositeType : public DerivedType { +protected: + inline explicit CompositeType(TypeID id) : DerivedType(id) { } +public: + + /// getTypeAtIndex - Given an index value into the type, return the type of + /// the element. + /// + virtual const Type *getTypeAtIndex(const Value *V) const = 0; + virtual const Type *getTypeAtIndex(unsigned Idx) const = 0; + virtual bool indexValid(const Value *V) const = 0; + virtual bool indexValid(unsigned Idx) const = 0; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const CompositeType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == ArrayTyID || + T->getTypeID() == StructTyID || + T->getTypeID() == PointerTyID || + T->getTypeID() == VectorTyID; + } +}; + + +/// StructType - Class to represent struct types +/// +class StructType : public CompositeType { + friend class TypeMap<StructValType, StructType>; + StructType(const StructType &); // Do not implement + const StructType &operator=(const StructType &); // Do not implement + StructType(const std::vector<const Type*> &Types, bool isPacked); +public: + /// StructType::get - This static method is the primary way to create a + /// StructType. + /// + static StructType *get(const std::vector<const Type*> &Params, + bool isPacked=false); + + /// StructType::get - This static method is a convenience method for + /// creating structure types by specifying the elements as arguments. + /// Note that this method always returns a non-packed struct. To get + /// an empty struct, pass NULL, NULL. + static StructType *get(const Type *type, ...) END_WITH_NULL; + + // Iterator access to the elements + typedef Type::subtype_iterator element_iterator; + element_iterator element_begin() const { return ContainedTys; } + element_iterator element_end() const { return &ContainedTys[NumContainedTys];} + + // Random access to the elements + unsigned getNumElements() const { return NumContainedTys; } + const Type *getElementType(unsigned N) const { + assert(N < NumContainedTys && "Element number out of range!"); + return ContainedTys[N]; + } + + /// getTypeAtIndex - Given an index value into the type, return the type of + /// the element. For a structure type, this must be a constant value... + /// + virtual const Type *getTypeAtIndex(const Value *V) const; + virtual const Type *getTypeAtIndex(unsigned Idx) const; + virtual bool indexValid(const Value *V) const; + virtual bool indexValid(unsigned Idx) const; + + // Implement the AbstractTypeUser interface. + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + virtual void typeBecameConcrete(const DerivedType *AbsTy); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const StructType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == StructTyID; + } + + bool isPacked() const { return (0 != getSubclassData()) ? true : false; } +}; + + +/// SequentialType - This is the superclass of the array, pointer and vector +/// type classes. All of these represent "arrays" in memory. The array type +/// represents a specifically sized array, pointer types are unsized/unknown +/// size arrays, vector types represent specifically sized arrays that +/// allow for use of SIMD instructions. SequentialType holds the common +/// features of all, which stem from the fact that all three lay their +/// components out in memory identically. +/// +class SequentialType : public CompositeType { + PATypeHandle ContainedType; ///< Storage for the single contained type + SequentialType(const SequentialType &); // Do not implement! + const SequentialType &operator=(const SequentialType &); // Do not implement! + + // avoiding warning: 'this' : used in base member initializer list + SequentialType* this_() { return this; } +protected: + SequentialType(TypeID TID, const Type *ElType) + : CompositeType(TID), ContainedType(ElType, this_()) { + ContainedTys = &ContainedType; + NumContainedTys = 1; + } + +public: + inline const Type *getElementType() const { return ContainedTys[0]; } + + virtual bool indexValid(const Value *V) const; + virtual bool indexValid(unsigned) const { + return true; + } + + /// getTypeAtIndex - Given an index value into the type, return the type of + /// the element. For sequential types, there is only one subtype... + /// + virtual const Type *getTypeAtIndex(const Value *) const { + return ContainedTys[0]; + } + virtual const Type *getTypeAtIndex(unsigned) const { + return ContainedTys[0]; + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SequentialType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == ArrayTyID || + T->getTypeID() == PointerTyID || + T->getTypeID() == VectorTyID; + } +}; + + +/// ArrayType - Class to represent array types +/// +class ArrayType : public SequentialType { + friend class TypeMap<ArrayValType, ArrayType>; + uint64_t NumElements; + + ArrayType(const ArrayType &); // Do not implement + const ArrayType &operator=(const ArrayType &); // Do not implement + ArrayType(const Type *ElType, uint64_t NumEl); +public: + /// ArrayType::get - This static method is the primary way to construct an + /// ArrayType + /// + static ArrayType *get(const Type *ElementType, uint64_t NumElements); + + inline uint64_t getNumElements() const { return NumElements; } + + // Implement the AbstractTypeUser interface. + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + virtual void typeBecameConcrete(const DerivedType *AbsTy); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ArrayType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == ArrayTyID; + } +}; + +/// VectorType - Class to represent vector types +/// +class VectorType : public SequentialType { + friend class TypeMap<VectorValType, VectorType>; + unsigned NumElements; + + VectorType(const VectorType &); // Do not implement + const VectorType &operator=(const VectorType &); // Do not implement + VectorType(const Type *ElType, unsigned NumEl); +public: + /// VectorType::get - This static method is the primary way to construct an + /// VectorType + /// + static VectorType *get(const Type *ElementType, unsigned NumElements); + + /// VectorType::getInteger - This static method gets a VectorType with the + /// same number of elements as the input type, and the element type is an + /// integer type of the same width as the input element type. + /// + static VectorType *getInteger(const VectorType *VTy) { + unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); + const Type *EltTy = IntegerType::get(EltBits); + return VectorType::get(EltTy, VTy->getNumElements()); + } + + /// VectorType::getExtendedElementVectorType - This static method is like + /// getInteger except that the element types are twice as wide as the + /// elements in the input type. + /// + static VectorType *getExtendedElementVectorType(const VectorType *VTy) { + unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); + const Type *EltTy = IntegerType::get(EltBits * 2); + return VectorType::get(EltTy, VTy->getNumElements()); + } + + /// VectorType::getTruncatedElementVectorType - This static method is like + /// getInteger except that the element types are half as wide as the + /// elements in the input type. + /// + static VectorType *getTruncatedElementVectorType(const VectorType *VTy) { + unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); + assert((EltBits & 1) == 0 && + "Cannot truncate vector element with odd bit-width"); + const Type *EltTy = IntegerType::get(EltBits / 2); + return VectorType::get(EltTy, VTy->getNumElements()); + } + + /// @brief Return the number of elements in the Vector type. + inline unsigned getNumElements() const { return NumElements; } + + /// @brief Return the number of bits in the Vector type. + inline unsigned getBitWidth() const { + return NumElements *getElementType()->getPrimitiveSizeInBits(); + } + + // Implement the AbstractTypeUser interface. + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + virtual void typeBecameConcrete(const DerivedType *AbsTy); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const VectorType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == VectorTyID; + } +}; + + +/// PointerType - Class to represent pointers +/// +class PointerType : public SequentialType { + friend class TypeMap<PointerValType, PointerType>; + unsigned AddressSpace; + + PointerType(const PointerType &); // Do not implement + const PointerType &operator=(const PointerType &); // Do not implement + explicit PointerType(const Type *ElType, unsigned AddrSpace); +public: + /// PointerType::get - This constructs a pointer to an object of the specified + /// type in a numbered address space. + static PointerType *get(const Type *ElementType, unsigned AddressSpace); + + /// PointerType::getUnqual - This constructs a pointer to an object of the + /// specified type in the generic address space (address space zero). + static PointerType *getUnqual(const Type *ElementType) { + return PointerType::get(ElementType, 0); + } + + /// @brief Return the address space of the Pointer type. + inline unsigned getAddressSpace() const { return AddressSpace; } + + // Implement the AbstractTypeUser interface. + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + virtual void typeBecameConcrete(const DerivedType *AbsTy); + + // Implement support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const PointerType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == PointerTyID; + } +}; + + +/// OpaqueType - Class to represent abstract types +/// +class OpaqueType : public DerivedType { + OpaqueType(const OpaqueType &); // DO NOT IMPLEMENT + const OpaqueType &operator=(const OpaqueType &); // DO NOT IMPLEMENT + OpaqueType(); +public: + /// OpaqueType::get - Static factory method for the OpaqueType class... + /// + static OpaqueType *get() { + return new OpaqueType(); // All opaque types are distinct + } + + // Implement support for type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const OpaqueType *) { return true; } + static inline bool classof(const Type *T) { + return T->getTypeID() == OpaqueTyID; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h new file mode 100644 index 0000000..0d171f6 --- /dev/null +++ b/include/llvm/ExecutionEngine/ExecutionEngine.h @@ -0,0 +1,354 @@ +//===- ExecutionEngine.h - Abstract Execution Engine Interface --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the abstract interface that implements execution support +// for LLVM. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTION_ENGINE_H +#define LLVM_EXECUTION_ENGINE_H + +#include <vector> +#include <map> +#include <string> +#include "llvm/ADT/SmallVector.h" +#include "llvm/System/Mutex.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +struct GenericValue; +class Constant; +class Function; +class GlobalVariable; +class GlobalValue; +class Module; +class ModuleProvider; +class TargetData; +class Type; +class MutexGuard; +class JITMemoryManager; +class MachineCodeInfo; + +class ExecutionEngineState { +private: + /// GlobalAddressMap - A mapping between LLVM global values and their + /// actualized version... + std::map<const GlobalValue*, void *> GlobalAddressMap; + + /// GlobalAddressReverseMap - This is the reverse mapping of GlobalAddressMap, + /// used to convert raw addresses into the LLVM global value that is emitted + /// at the address. This map is not computed unless getGlobalValueAtAddress + /// is called at some point. + std::map<void *, const GlobalValue*> GlobalAddressReverseMap; + +public: + std::map<const GlobalValue*, void *> & + getGlobalAddressMap(const MutexGuard &) { + return GlobalAddressMap; + } + + std::map<void*, const GlobalValue*> & + getGlobalAddressReverseMap(const MutexGuard &) { + return GlobalAddressReverseMap; + } +}; + + +class ExecutionEngine { + const TargetData *TD; + ExecutionEngineState state; + bool LazyCompilationDisabled; + bool GVCompilationDisabled; + bool SymbolSearchingDisabled; + bool DlsymStubsEnabled; + +protected: + /// Modules - This is a list of ModuleProvider's that we are JIT'ing from. We + /// use a smallvector to optimize for the case where there is only one module. + SmallVector<ModuleProvider*, 1> Modules; + + void setTargetData(const TargetData *td) { + TD = td; + } + + /// getMemoryforGV - Allocate memory for a global variable. + virtual char* getMemoryForGV(const GlobalVariable* GV); + + // To avoid having libexecutionengine depend on the JIT and interpreter + // libraries, the JIT and Interpreter set these functions to ctor pointers + // at startup time if they are linked in. + typedef ExecutionEngine *(*EECtorFn)(ModuleProvider*, std::string*, + CodeGenOpt::Level OptLevel); + static EECtorFn JITCtor, InterpCtor; + + /// LazyFunctionCreator - If an unknown function is needed, this function + /// pointer is invoked to create it. If this returns null, the JIT will abort. + void* (*LazyFunctionCreator)(const std::string &); + + /// ExceptionTableRegister - If Exception Handling is set, the JIT will + /// register dwarf tables with this function + typedef void (*EERegisterFn)(void*); + static EERegisterFn ExceptionTableRegister; + +public: + /// lock - This lock is protects the ExecutionEngine, JIT, JITResolver and + /// JITEmitter classes. It must be held while changing the internal state of + /// any of those classes. + sys::Mutex lock; // Used to make this class and subclasses thread-safe + + //===--------------------------------------------------------------------===// + // ExecutionEngine Startup + //===--------------------------------------------------------------------===// + + virtual ~ExecutionEngine(); + + /// create - This is the factory method for creating an execution engine which + /// is appropriate for the current machine. This takes ownership of the + /// module provider. + static ExecutionEngine *create(ModuleProvider *MP, + bool ForceInterpreter = false, + std::string *ErrorStr = 0, + CodeGenOpt::Level OptLevel = + CodeGenOpt::Default); + + /// create - This is the factory method for creating an execution engine which + /// is appropriate for the current machine. This takes ownership of the + /// module. + static ExecutionEngine *create(Module *M); + + /// createJIT - This is the factory method for creating a JIT for the current + /// machine, it does not fall back to the interpreter. This takes ownership + /// of the ModuleProvider and JITMemoryManager if successful. + static ExecutionEngine *createJIT(ModuleProvider *MP, + std::string *ErrorStr = 0, + JITMemoryManager *JMM = 0, + CodeGenOpt::Level OptLevel = + CodeGenOpt::Default); + + /// addModuleProvider - Add a ModuleProvider to the list of modules that we + /// can JIT from. Note that this takes ownership of the ModuleProvider: when + /// the ExecutionEngine is destroyed, it destroys the MP as well. + virtual void addModuleProvider(ModuleProvider *P) { + Modules.push_back(P); + } + + //===----------------------------------------------------------------------===// + + const TargetData *getTargetData() const { return TD; } + + + /// removeModuleProvider - Remove a ModuleProvider from the list of modules. + /// Relases the Module from the ModuleProvider, materializing it in the + /// process, and returns the materialized Module. + virtual Module* removeModuleProvider(ModuleProvider *P, + std::string *ErrInfo = 0); + + /// deleteModuleProvider - Remove a ModuleProvider from the list of modules, + /// and deletes the ModuleProvider and owned Module. Avoids materializing + /// the underlying module. + virtual void deleteModuleProvider(ModuleProvider *P,std::string *ErrInfo = 0); + + /// FindFunctionNamed - Search all of the active modules to find the one that + /// defines FnName. This is very slow operation and shouldn't be used for + /// general code. + Function *FindFunctionNamed(const char *FnName); + + /// runFunction - Execute the specified function with the specified arguments, + /// and return the result. + /// + virtual GenericValue runFunction(Function *F, + const std::vector<GenericValue> &ArgValues) = 0; + + /// runStaticConstructorsDestructors - This method is used to execute all of + /// the static constructors or destructors for a program, depending on the + /// value of isDtors. + void runStaticConstructorsDestructors(bool isDtors); + /// runStaticConstructorsDestructors - This method is used to execute all of + /// the static constructors or destructors for a module, depending on the + /// value of isDtors. + void runStaticConstructorsDestructors(Module *module, bool isDtors); + + + /// runFunctionAsMain - This is a helper function which wraps runFunction to + /// handle the common task of starting up main with the specified argc, argv, + /// and envp parameters. + int runFunctionAsMain(Function *Fn, const std::vector<std::string> &argv, + const char * const * envp); + + + /// addGlobalMapping - Tell the execution engine that the specified global is + /// at the specified location. This is used internally as functions are JIT'd + /// and as global variables are laid out in memory. It can and should also be + /// used by clients of the EE that want to have an LLVM global overlay + /// existing data in memory. After adding a mapping for GV, you must not + /// destroy it until you've removed the mapping. + void addGlobalMapping(const GlobalValue *GV, void *Addr); + + /// clearAllGlobalMappings - Clear all global mappings and start over again + /// use in dynamic compilation scenarios when you want to move globals + void clearAllGlobalMappings(); + + /// clearGlobalMappingsFromModule - Clear all global mappings that came from a + /// particular module, because it has been removed from the JIT. + void clearGlobalMappingsFromModule(Module *M); + + /// updateGlobalMapping - Replace an existing mapping for GV with a new + /// address. This updates both maps as required. If "Addr" is null, the + /// entry for the global is removed from the mappings. This returns the old + /// value of the pointer, or null if it was not in the map. + void *updateGlobalMapping(const GlobalValue *GV, void *Addr); + + /// getPointerToGlobalIfAvailable - This returns the address of the specified + /// global value if it is has already been codegen'd, otherwise it returns + /// null. + /// + void *getPointerToGlobalIfAvailable(const GlobalValue *GV); + + /// getPointerToGlobal - This returns the address of the specified global + /// value. This may involve code generation if it's a function. After + /// getting a pointer to GV, it and all globals it transitively refers to have + /// been passed to addGlobalMapping. You must clear the mapping for each + /// referred-to global before destroying it. If a referred-to global RTG is a + /// function and this ExecutionEngine is a JIT compiler, calling + /// updateGlobalMapping(RTG, 0) will leak the function's machine code, so you + /// should call freeMachineCodeForFunction(RTG) instead. Note that + /// optimizations can move and delete non-external GlobalValues without + /// notifying the ExecutionEngine. + /// + void *getPointerToGlobal(const GlobalValue *GV); + + /// getPointerToFunction - The different EE's represent function bodies in + /// different ways. They should each implement this to say what a function + /// pointer should look like. See getPointerToGlobal for the requirements on + /// destroying F and any GlobalValues it refers to. + /// + virtual void *getPointerToFunction(Function *F) = 0; + + /// getPointerToFunctionOrStub - If the specified function has been + /// code-gen'd, return a pointer to the function. If not, compile it, or use + /// a stub to implement lazy compilation if available. See getPointerToGlobal + /// for the requirements on destroying F and any GlobalValues it refers to. + /// + virtual void *getPointerToFunctionOrStub(Function *F) { + // Default implementation, just codegen the function. + return getPointerToFunction(F); + } + + // The JIT overrides a version that actually does this. + virtual void runJITOnFunction(Function *F, MachineCodeInfo *MCI = 0) { } + + /// getGlobalValueAtAddress - Return the LLVM global value object that starts + /// at the specified address. + /// + const GlobalValue *getGlobalValueAtAddress(void *Addr); + + + void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr, + const Type *Ty); + void InitializeMemory(const Constant *Init, void *Addr); + + /// recompileAndRelinkFunction - This method is used to force a function + /// which has already been compiled to be compiled again, possibly + /// after it has been modified. Then the entry to the old copy is overwritten + /// with a branch to the new copy. If there was no old copy, this acts + /// just like VM::getPointerToFunction(). + /// + virtual void *recompileAndRelinkFunction(Function *F) = 0; + + /// freeMachineCodeForFunction - Release memory in the ExecutionEngine + /// corresponding to the machine code emitted to execute this function, useful + /// for garbage-collecting generated code. + /// + virtual void freeMachineCodeForFunction(Function *F) = 0; + + /// getOrEmitGlobalVariable - Return the address of the specified global + /// variable, possibly emitting it to memory if needed. This is used by the + /// Emitter. See getPointerToGlobal for the requirements on destroying GV and + /// any GlobalValues it refers to. + virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) { + return getPointerToGlobal((GlobalValue*)GV); + } + + /// DisableLazyCompilation - If called, the JIT will abort if lazy compilation + /// is ever attempted. + void DisableLazyCompilation(bool Disabled = true) { + LazyCompilationDisabled = Disabled; + } + bool isLazyCompilationDisabled() const { + return LazyCompilationDisabled; + } + + /// DisableGVCompilation - If called, the JIT will abort if it's asked to + /// allocate space and populate a GlobalVariable that is not internal to + /// the module. + void DisableGVCompilation(bool Disabled = true) { + GVCompilationDisabled = Disabled; + } + bool isGVCompilationDisabled() const { + return GVCompilationDisabled; + } + + /// DisableSymbolSearching - If called, the JIT will not try to lookup unknown + /// symbols with dlsym. A client can still use InstallLazyFunctionCreator to + /// resolve symbols in a custom way. + void DisableSymbolSearching(bool Disabled = true) { + SymbolSearchingDisabled = Disabled; + } + bool isSymbolSearchingDisabled() const { + return SymbolSearchingDisabled; + } + + /// EnableDlsymStubs - + void EnableDlsymStubs(bool Enabled = true) { + DlsymStubsEnabled = Enabled; + } + bool areDlsymStubsEnabled() const { + return DlsymStubsEnabled; + } + + /// InstallLazyFunctionCreator - If an unknown function is needed, the + /// specified function pointer is invoked to create it. If it returns null, + /// the JIT will abort. + void InstallLazyFunctionCreator(void* (*P)(const std::string &)) { + LazyFunctionCreator = P; + } + + /// InstallExceptionTableRegister - The JIT will use the given function + /// to register the exception tables it generates. + static void InstallExceptionTableRegister(void (*F)(void*)) { + ExceptionTableRegister = F; + } + + /// RegisterTable - Registers the given pointer as an exception table. It uses + /// the ExceptionTableRegister function. + static void RegisterTable(void* res) { + if (ExceptionTableRegister) + ExceptionTableRegister(res); + } + +protected: + explicit ExecutionEngine(ModuleProvider *P); + + void emitGlobals(); + + // EmitGlobalVariable - This method emits the specified global variable to the + // address specified in GlobalAddresses, or allocates new memory if it's not + // already in the map. + void EmitGlobalVariable(const GlobalVariable *GV); + + GenericValue getConstantValue(const Constant *C); + void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr, + const Type *Ty); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ExecutionEngine/GenericValue.h b/include/llvm/ExecutionEngine/GenericValue.h new file mode 100644 index 0000000..a2fed98 --- /dev/null +++ b/include/llvm/ExecutionEngine/GenericValue.h @@ -0,0 +1,44 @@ +//===-- GenericValue.h - Represent any type of LLVM value -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The GenericValue class is used to represent an LLVM value of arbitrary type. +// +//===----------------------------------------------------------------------===// + + +#ifndef GENERIC_VALUE_H +#define GENERIC_VALUE_H + +#include "llvm/ADT/APInt.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +typedef void* PointerTy; +class APInt; + +struct GenericValue { + union { + double DoubleVal; + float FloatVal; + PointerTy PointerVal; + struct { unsigned int first; unsigned int second; } UIntPairVal; + unsigned char Untyped[8]; + }; + APInt IntVal; // also used for long doubles + + GenericValue() : DoubleVal(0.0), IntVal(1,0) {} + explicit GenericValue(void *V) : PointerVal(V), IntVal(1,0) { } +}; + +inline GenericValue PTOGV(void *P) { return GenericValue(P); } +inline void* GVTOP(const GenericValue &GV) { return GV.PointerVal; } + +} // End llvm namespace +#endif diff --git a/include/llvm/ExecutionEngine/Interpreter.h b/include/llvm/ExecutionEngine/Interpreter.h new file mode 100644 index 0000000..b2b0464 --- /dev/null +++ b/include/llvm/ExecutionEngine/Interpreter.h @@ -0,0 +1,40 @@ +//===-- Interpreter.h - Abstract Execution Engine Interface -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file forces the interpreter to link in on certain operating systems. +// (Windows). +// +//===----------------------------------------------------------------------===// + +#ifndef EXECUTION_ENGINE_INTERPRETER_H +#define EXECUTION_ENGINE_INTERPRETER_H + +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include <cstdlib> + +namespace llvm { + extern void LinkInInterpreter(); +} + +namespace { + struct ForceInterpreterLinking { + ForceInterpreterLinking() { + // We must reference the passes in such a way that compilers will not + // delete it all as dead code, even with whole program optimization, + // yet is effectively a NO-OP. As the compiler isn't smart enough + // to know that getenv() never returns -1, this will do the job. + if (std::getenv("bar") != (char*) -1) + return; + + llvm::LinkInInterpreter(); + } + } ForceInterpreterLinking; +} + +#endif diff --git a/include/llvm/ExecutionEngine/JIT.h b/include/llvm/ExecutionEngine/JIT.h new file mode 100644 index 0000000..d4d1e73 --- /dev/null +++ b/include/llvm/ExecutionEngine/JIT.h @@ -0,0 +1,40 @@ +//===-- JIT.h - Abstract Execution Engine Interface -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file forces the JIT to link in on certain operating systems. +// (Windows). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTION_ENGINE_JIT_H +#define LLVM_EXECUTION_ENGINE_JIT_H + +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include <cstdlib> + +namespace llvm { + extern void LinkInJIT(); +} + +namespace { + struct ForceJITLinking { + ForceJITLinking() { + // We must reference the passes in such a way that compilers will not + // delete it all as dead code, even with whole program optimization, + // yet is effectively a NO-OP. As the compiler isn't smart enough + // to know that getenv() never returns -1, this will do the job. + if (std::getenv("bar") != (char*) -1) + return; + + llvm::LinkInJIT(); + } + } ForceJITLinking; +} + +#endif diff --git a/include/llvm/ExecutionEngine/JITMemoryManager.h b/include/llvm/ExecutionEngine/JITMemoryManager.h new file mode 100644 index 0000000..688a162 --- /dev/null +++ b/include/llvm/ExecutionEngine/JITMemoryManager.h @@ -0,0 +1,135 @@ +//===-- JITMemoryManager.h - Interface JIT uses to Allocate Mem -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the JITMemoryManagerInterface +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H +#define LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + class Function; + +/// JITMemoryManager - This interface is used by the JIT to allocate and manage +/// memory for the code generated by the JIT. This can be reimplemented by +/// clients that have a strong desire to control how the layout of JIT'd memory +/// works. +class JITMemoryManager { +protected: + bool HasGOT; + bool SizeRequired; +public: + JITMemoryManager() : HasGOT(false), SizeRequired(false) {} + virtual ~JITMemoryManager(); + + /// CreateDefaultMemManager - This is used to create the default + /// JIT Memory Manager if the client does not provide one to the JIT. + static JITMemoryManager *CreateDefaultMemManager(); + + /// setMemoryWritable - When code generation is in progress, + /// the code pages may need permissions changed. + virtual void setMemoryWritable(void) = 0; + + /// setMemoryExecutable - When code generation is done and we're ready to + /// start execution, the code pages may need permissions changed. + virtual void setMemoryExecutable(void) = 0; + + //===--------------------------------------------------------------------===// + // Global Offset Table Management + //===--------------------------------------------------------------------===// + + /// AllocateGOT - If the current table requires a Global Offset Table, this + /// method is invoked to allocate it. This method is required to set HasGOT + /// to true. + virtual void AllocateGOT() = 0; + + /// isManagingGOT - Return true if the AllocateGOT method is called. + /// + bool isManagingGOT() const { + return HasGOT; + } + + /// getGOTBase - If this is managing a Global Offset Table, this method should + /// return a pointer to its base. + virtual uint8_t *getGOTBase() const = 0; + + /// SetDlsymTable - If the JIT must be able to relocate stubs after they have + /// been emitted, potentially because they are being copied to a process + /// where external symbols live at different addresses than in the JITing + /// process, allocate a table with sufficient information to do so. + virtual void SetDlsymTable(void *ptr) = 0; + + /// getDlsymTable - If this is managing a table of entries so that stubs to + /// external symbols can be later relocated, this method should return a + /// pointer to it. + virtual void *getDlsymTable() const = 0; + + /// NeedsExactSize - If the memory manager requires to know the size of the + /// objects to be emitted + bool NeedsExactSize() const { + return SizeRequired; + } + + //===--------------------------------------------------------------------===// + // Main Allocation Functions + //===--------------------------------------------------------------------===// + + /// startFunctionBody - When we start JITing a function, the JIT calls this + /// method to allocate a block of free RWX memory, which returns a pointer to + /// it. The JIT doesn't know ahead of time how much space it will need to + /// emit the function, so it doesn't pass in the size. Instead, this method + /// is required to pass back a "valid size". The JIT will be careful to not + /// write more than the returned ActualSize bytes of memory. + virtual uint8_t *startFunctionBody(const Function *F, + uintptr_t &ActualSize) = 0; + + /// allocateStub - This method is called by the JIT to allocate space for a + /// function stub (used to handle limited branch displacements) while it is + /// JIT compiling a function. For example, if foo calls bar, and if bar + /// either needs to be lazily compiled or is a native function that exists too + /// far away from the call site to work, this method will be used to make a + /// thunk for it. The stub should be "close" to the current function body, + /// but should not be included in the 'actualsize' returned by + /// startFunctionBody. + virtual uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize, + unsigned Alignment) = 0; + + /// endFunctionBody - This method is called when the JIT is done codegen'ing + /// the specified function. At this point we know the size of the JIT + /// compiled function. This passes in FunctionStart (which was returned by + /// the startFunctionBody method) and FunctionEnd which is a pointer to the + /// actual end of the function. This method should mark the space allocated + /// and remember where it is in case the client wants to deallocate it. + virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart, + uint8_t *FunctionEnd) = 0; + + /// allocateSpace - Allocate a memory block of the given size. + virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0; + + /// deallocateMemForFunction - Free JIT memory for the specified function. + /// This is never called when the JIT is currently emitting a function. + virtual void deallocateMemForFunction(const Function *F) = 0; + + /// startExceptionTable - When we finished JITing the function, if exception + /// handling is set, we emit the exception table. + virtual uint8_t* startExceptionTable(const Function* F, + uintptr_t &ActualSize) = 0; + + /// endExceptionTable - This method is called when the JIT is done emitting + /// the exception table. + virtual void endExceptionTable(const Function *F, uint8_t *TableStart, + uint8_t *TableEnd, uint8_t* FrameRegister) = 0; +}; + +} // end namespace llvm. + +#endif diff --git a/include/llvm/Function.h b/include/llvm/Function.h new file mode 100644 index 0000000..ccc006c --- /dev/null +++ b/include/llvm/Function.h @@ -0,0 +1,412 @@ +//===-- llvm/Function.h - Class to represent a single function --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the Function class, which represents a +// single function/procedure in LLVM. +// +// A function basically consists of a list of basic blocks, a list of arguments, +// and a symbol table. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUNCTION_H +#define LLVM_FUNCTION_H + +#include "llvm/GlobalValue.h" +#include "llvm/BasicBlock.h" +#include "llvm/Argument.h" +#include "llvm/Support/Annotation.h" +#include "llvm/Attributes.h" + +namespace llvm { + +class FunctionType; + +// Traits for intrusive list of basic blocks... +template<> struct ilist_traits<BasicBlock> + : public SymbolTableListTraits<BasicBlock, Function> { + + // createSentinel is used to get hold of the node that marks the end of the + // list... (same trick used here as in ilist_traits<Instruction>) + BasicBlock *createSentinel() const { + return static_cast<BasicBlock*>(&Sentinel); + } + static void destroySentinel(BasicBlock*) {} + + BasicBlock *provideInitialHead() const { return createSentinel(); } + BasicBlock *ensureHead(BasicBlock*) const { return createSentinel(); } + static void noteHead(BasicBlock*, BasicBlock*) {} + + static ValueSymbolTable *getSymTab(Function *ItemParent); +private: + mutable ilist_node<BasicBlock> Sentinel; +}; + +template<> struct ilist_traits<Argument> + : public SymbolTableListTraits<Argument, Function> { + + Argument *createSentinel() const { + return static_cast<Argument*>(&Sentinel); + } + static void destroySentinel(Argument*) {} + + Argument *provideInitialHead() const { return createSentinel(); } + Argument *ensureHead(Argument*) const { return createSentinel(); } + static void noteHead(Argument*, Argument*) {} + + static ValueSymbolTable *getSymTab(Function *ItemParent); +private: + mutable ilist_node<Argument> Sentinel; +}; + +class Function : public GlobalValue, public Annotable, + public ilist_node<Function> { +public: + typedef iplist<Argument> ArgumentListType; + typedef iplist<BasicBlock> BasicBlockListType; + + // BasicBlock iterators... + typedef BasicBlockListType::iterator iterator; + typedef BasicBlockListType::const_iterator const_iterator; + + typedef ArgumentListType::iterator arg_iterator; + typedef ArgumentListType::const_iterator const_arg_iterator; + +private: + // Important things that make up a function! + BasicBlockListType BasicBlocks; ///< The basic blocks + mutable ArgumentListType ArgumentList; ///< The formal arguments + ValueSymbolTable *SymTab; ///< Symbol table of args/instructions + AttrListPtr AttributeList; ///< Parameter attributes + + // The Calling Convention is stored in Value::SubclassData. + /*unsigned CallingConvention;*/ + + friend class SymbolTableListTraits<Function, Module>; + + void setParent(Module *parent); + + /// hasLazyArguments/CheckLazyArguments - The argument list of a function is + /// built on demand, so that the list isn't allocated until the first client + /// needs it. The hasLazyArguments predicate returns true if the arg list + /// hasn't been set up yet. + bool hasLazyArguments() const { + return SubclassData & 1; + } + void CheckLazyArguments() const { + if (hasLazyArguments()) + BuildLazyArguments(); + } + void BuildLazyArguments() const; + + Function(const Function&); // DO NOT IMPLEMENT + void operator=(const Function&); // DO NOT IMPLEMENT + + /// Function ctor - If the (optional) Module argument is specified, the + /// function is automatically inserted into the end of the function list for + /// the module. + /// + Function(const FunctionType *Ty, LinkageTypes Linkage, + const std::string &N = "", Module *M = 0); + +public: + static Function *Create(const FunctionType *Ty, LinkageTypes Linkage, + const std::string &N = "", Module *M = 0) { + return new(0) Function(Ty, Linkage, N, M); + } + + ~Function(); + + const Type *getReturnType() const; // Return the type of the ret val + const FunctionType *getFunctionType() const; // Return the FunctionType for me + + /// isVarArg - Return true if this function takes a variable number of + /// arguments. + bool isVarArg() const; + + /// isDeclaration - Is the body of this function unknown? (The basic block + /// list is empty if so.) This is true for function declarations, but not + /// true for function definitions. + /// + virtual bool isDeclaration() const { return BasicBlocks.empty(); } + + /// getIntrinsicID - This method returns the ID number of the specified + /// function, or Intrinsic::not_intrinsic if the function is not an + /// instrinsic, or if the pointer is null. This value is always defined to be + /// zero to allow easy checking for whether a function is intrinsic or not. + /// The particular intrinsic functions which correspond to this value are + /// defined in llvm/Intrinsics.h. + /// + unsigned getIntrinsicID() const; + bool isIntrinsic() const { return getIntrinsicID() != 0; } + + /// getCallingConv()/setCallingConv(uint) - These method get and set the + /// calling convention of this function. The enum values for the known + /// calling conventions are defined in CallingConv.h. + unsigned getCallingConv() const { return SubclassData >> 1; } + void setCallingConv(unsigned CC) { + SubclassData = (SubclassData & 1) | (CC << 1); + } + + /// getAttributes - Return the attribute list for this Function. + /// + const AttrListPtr &getAttributes() const { return AttributeList; } + + /// setAttributes - Set the attribute list for this Function. + /// + void setAttributes(const AttrListPtr &attrs) { AttributeList = attrs; } + + /// hasFnAttr - Return true if this function has the given attribute. + bool hasFnAttr(Attributes N) const { + // Function Attributes are stored at ~0 index + return AttributeList.paramHasAttr(~0U, N); + } + + /// addFnAttr - Add function attributes to this function. + /// + void addFnAttr(Attributes N) { + // Function Attributes are stored at ~0 index + addAttribute(~0U, N); + } + + /// removeFnAttr - Remove function attributes from this function. + /// + void removeFnAttr(Attributes N) { + // Function Attributes are stored at ~0 index + removeAttribute(~0U, N); + } + + /// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm + /// to use during code generation. + bool hasGC() const; + const char *getGC() const; + void setGC(const char *Str); + void clearGC(); + + /// @brief Determine whether the function has the given attribute. + bool paramHasAttr(unsigned i, Attributes attr) const { + return AttributeList.paramHasAttr(i, attr); + } + + /// addAttribute - adds the attribute to the list of attributes. + void addAttribute(unsigned i, Attributes attr); + + /// removeAttribute - removes the attribute from the list of attributes. + void removeAttribute(unsigned i, Attributes attr); + + /// @brief Extract the alignment for a call or parameter (0=unknown). + unsigned getParamAlignment(unsigned i) const { + return AttributeList.getParamAlignment(i); + } + + /// @brief Determine if the function does not access memory. + bool doesNotAccessMemory() const { + return hasFnAttr(Attribute::ReadNone); + } + void setDoesNotAccessMemory(bool DoesNotAccessMemory = true) { + if (DoesNotAccessMemory) addFnAttr(Attribute::ReadNone); + else removeFnAttr(Attribute::ReadNone); + } + + /// @brief Determine if the function does not access or only reads memory. + bool onlyReadsMemory() const { + return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly); + } + void setOnlyReadsMemory(bool OnlyReadsMemory = true) { + if (OnlyReadsMemory) addFnAttr(Attribute::ReadOnly); + else removeFnAttr(Attribute::ReadOnly | Attribute::ReadNone); + } + + /// @brief Determine if the function cannot return. + bool doesNotReturn() const { + return hasFnAttr(Attribute::NoReturn); + } + void setDoesNotReturn(bool DoesNotReturn = true) { + if (DoesNotReturn) addFnAttr(Attribute::NoReturn); + else removeFnAttr(Attribute::NoReturn); + } + + /// @brief Determine if the function cannot unwind. + bool doesNotThrow() const { + return hasFnAttr(Attribute::NoUnwind); + } + void setDoesNotThrow(bool DoesNotThrow = true) { + if (DoesNotThrow) addFnAttr(Attribute::NoUnwind); + else removeFnAttr(Attribute::NoUnwind); + } + + /// @brief Determine if the function returns a structure through first + /// pointer argument. + bool hasStructRetAttr() const { + return paramHasAttr(1, Attribute::StructRet); + } + + /// @brief Determine if the parameter does not alias other parameters. + /// @param n The parameter to check. 1 is the first parameter, 0 is the return + bool doesNotAlias(unsigned n) const { + return paramHasAttr(n, Attribute::NoAlias); + } + void setDoesNotAlias(unsigned n, bool DoesNotAlias = true) { + if (DoesNotAlias) addAttribute(n, Attribute::NoAlias); + else removeAttribute(n, Attribute::NoAlias); + } + + /// @brief Determine if the parameter can be captured. + /// @param n The parameter to check. 1 is the first parameter, 0 is the return + bool doesNotCapture(unsigned n) const { + return paramHasAttr(n, Attribute::NoCapture); + } + void setDoesNotCapture(unsigned n, bool DoesNotCapture = true) { + if (DoesNotCapture) addAttribute(n, Attribute::NoCapture); + else removeAttribute(n, Attribute::NoCapture); + } + + /// copyAttributesFrom - copy all additional attributes (those not needed to + /// create a Function) from the Function Src to this one. + void copyAttributesFrom(const GlobalValue *Src); + + /// deleteBody - This method deletes the body of the function, and converts + /// the linkage to external. + /// + void deleteBody() { + dropAllReferences(); + setLinkage(ExternalLinkage); + } + + /// removeFromParent - This method unlinks 'this' from the containing module, + /// but does not delete it. + /// + virtual void removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing module + /// and deletes it. + /// + virtual void eraseFromParent(); + + + /// Get the underlying elements of the Function... the basic block list is + /// empty for external functions. + /// + const ArgumentListType &getArgumentList() const { + CheckLazyArguments(); + return ArgumentList; + } + ArgumentListType &getArgumentList() { + CheckLazyArguments(); + return ArgumentList; + } + static iplist<Argument> Function::*getSublistAccess(Argument*) { + return &Function::ArgumentList; + } + + const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; } + BasicBlockListType &getBasicBlockList() { return BasicBlocks; } + static iplist<BasicBlock> Function::*getSublistAccess(BasicBlock*) { + return &Function::BasicBlocks; + } + + const BasicBlock &getEntryBlock() const { return front(); } + BasicBlock &getEntryBlock() { return front(); } + + //===--------------------------------------------------------------------===// + // Symbol Table Accessing functions... + + /// getSymbolTable() - Return the symbol table... + /// + inline ValueSymbolTable &getValueSymbolTable() { return *SymTab; } + inline const ValueSymbolTable &getValueSymbolTable() const { return *SymTab; } + + + //===--------------------------------------------------------------------===// + // BasicBlock iterator forwarding functions + // + iterator begin() { return BasicBlocks.begin(); } + const_iterator begin() const { return BasicBlocks.begin(); } + iterator end () { return BasicBlocks.end(); } + const_iterator end () const { return BasicBlocks.end(); } + + size_t size() const { return BasicBlocks.size(); } + bool empty() const { return BasicBlocks.empty(); } + const BasicBlock &front() const { return BasicBlocks.front(); } + BasicBlock &front() { return BasicBlocks.front(); } + const BasicBlock &back() const { return BasicBlocks.back(); } + BasicBlock &back() { return BasicBlocks.back(); } + + //===--------------------------------------------------------------------===// + // Argument iterator forwarding functions + // + arg_iterator arg_begin() { + CheckLazyArguments(); + return ArgumentList.begin(); + } + const_arg_iterator arg_begin() const { + CheckLazyArguments(); + return ArgumentList.begin(); + } + arg_iterator arg_end() { + CheckLazyArguments(); + return ArgumentList.end(); + } + const_arg_iterator arg_end() const { + CheckLazyArguments(); + return ArgumentList.end(); + } + + size_t arg_size() const; + bool arg_empty() const; + + /// viewCFG - This function is meant for use from the debugger. You can just + /// say 'call F->viewCFG()' and a ghostview window should pop up from the + /// program, displaying the CFG of the current function with the code for each + /// basic block inside. This depends on there being a 'dot' and 'gv' program + /// in your path. + /// + void viewCFG() const; + + /// viewCFGOnly - This function is meant for use from the debugger. It works + /// just like viewCFG, but it does not include the contents of basic blocks + /// into the nodes, just the label. If you are only interested in the CFG + /// this can make the graph smaller. + /// + void viewCFGOnly() const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const Function *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == Value::FunctionVal; + } + + /// dropAllReferences() - This method causes all the subinstructions to "let + /// go" of all references that they are maintaining. This allows one to + /// 'delete' a whole module at a time, even though there may be circular + /// references... first all references are dropped, and all use counts go to + /// zero. Then everything is deleted for real. Note that no operations are + /// valid on an object that has "dropped all references", except operator + /// delete. + /// + /// Since no other object in the module can have references into the body of a + /// function, dropping all references deletes the entire body of the function, + /// including any contained basic blocks. + /// + void dropAllReferences(); +}; + +inline ValueSymbolTable * +ilist_traits<BasicBlock>::getSymTab(Function *F) { + return F ? &F->getValueSymbolTable() : 0; +} + +inline ValueSymbolTable * +ilist_traits<Argument>::getSymTab(Function *F) { + return F ? &F->getValueSymbolTable() : 0; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/GlobalAlias.h b/include/llvm/GlobalAlias.h new file mode 100644 index 0000000..b106116 --- /dev/null +++ b/include/llvm/GlobalAlias.h @@ -0,0 +1,98 @@ +//===-------- llvm/GlobalAlias.h - GlobalAlias class ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the GlobalAlias class, which +// represents a single function or variable alias in the IR. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_GLOBAL_ALIAS_H +#define LLVM_GLOBAL_ALIAS_H + +#include "llvm/GlobalValue.h" +#include "llvm/OperandTraits.h" +#include "llvm/ADT/ilist_node.h" + +namespace llvm { + +class Module; +class Constant; +template<typename ValueSubClass, typename ItemParentClass> + class SymbolTableListTraits; + +class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> { + friend class SymbolTableListTraits<GlobalAlias, Module>; + void operator=(const GlobalAlias &); // Do not implement + GlobalAlias(const GlobalAlias &); // Do not implement + + void setParent(Module *parent); + +public: + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 1); + } + /// GlobalAlias ctor - If a parent module is specified, the alias is + /// automatically inserted into the end of the specified module's alias list. + GlobalAlias(const Type *Ty, LinkageTypes Linkage, const std::string &Name = "", + Constant* Aliasee = 0, Module *Parent = 0); + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// isDeclaration - Is this global variable lacking an initializer? If so, + /// the global variable is defined in some other translation unit, and is thus + /// only a declaration here. + virtual bool isDeclaration() const; + + /// removeFromParent - This method unlinks 'this' from the containing module, + /// but does not delete it. + /// + virtual void removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing module + /// and deletes it. + /// + virtual void eraseFromParent(); + + /// set/getAliasee - These methods retrive and set alias target. + void setAliasee(Constant* GV); + const Constant* getAliasee() const { + return cast_or_null<Constant>(getOperand(0)); + } + Constant* getAliasee() { + return cast_or_null<Constant>(getOperand(0)); + } + /// getAliasedGlobal() - Aliasee can be either global or bitcast of + /// global. This method retrives the global for both aliasee flavours. + const GlobalValue* getAliasedGlobal() const; + + /// resolveAliasedGlobal() - This method tries to ultimately resolve the alias + /// by going through the aliasing chain and trying to find the very last + /// global. Returns NULL if a cycle was found. If stopOnWeak is false, then + /// the whole chain aliasing chain is traversed, otherwise - only strong + /// aliases. + const GlobalValue* resolveAliasedGlobal(bool stopOnWeak = true) const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const GlobalAlias *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == Value::GlobalAliasVal; + } +}; + +template <> +struct OperandTraits<GlobalAlias> : FixedNumOperandTraits<1> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalAlias, Value) + +} // End llvm namespace + +#endif diff --git a/include/llvm/GlobalValue.h b/include/llvm/GlobalValue.h new file mode 100644 index 0000000..3b7f67d --- /dev/null +++ b/include/llvm/GlobalValue.h @@ -0,0 +1,211 @@ +//===-- llvm/GlobalValue.h - Class to represent a global value --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a common base class of all globally definable objects. As such, +// it is subclassed by GlobalVariable, GlobalAlias and by Function. This is +// used because you can do certain things with these global objects that you +// can't do to anything else. For example, use the address of one as a +// constant. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_GLOBALVALUE_H +#define LLVM_GLOBALVALUE_H + +#include "llvm/Constant.h" + +namespace llvm { + +class PointerType; +class Module; + +class GlobalValue : public Constant { + GlobalValue(const GlobalValue &); // do not implement +public: + /// @brief An enumeration for the kinds of linkage for global values. + enum LinkageTypes { + ExternalLinkage = 0,///< Externally visible function + AvailableExternallyLinkage, ///< Available for inspection, not emission. + LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline) + LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent. + WeakAnyLinkage, ///< Keep one copy of named function when linking (weak) + WeakODRLinkage, ///< Same, but only replaced by something equivalent. + AppendingLinkage, ///< Special purpose, only applies to global arrays + InternalLinkage, ///< Rename collisions when linking (static functions) + PrivateLinkage, ///< Like Internal, but omit from symbol table + DLLImportLinkage, ///< Function to be imported from DLL + DLLExportLinkage, ///< Function to be accessible from DLL + ExternalWeakLinkage,///< ExternalWeak linkage description + GhostLinkage, ///< Stand-in functions for streaming fns from BC files + CommonLinkage ///< Tentative definitions + }; + + /// @brief An enumeration for the kinds of visibility of global values. + enum VisibilityTypes { + DefaultVisibility = 0, ///< The GV is visible + HiddenVisibility, ///< The GV is hidden + ProtectedVisibility ///< The GV is protected + }; + +protected: + GlobalValue(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps, + LinkageTypes linkage, const std::string &name = "") + : Constant(ty, vty, Ops, NumOps), Parent(0), + Linkage(linkage), Visibility(DefaultVisibility), Alignment(0) { + if (!name.empty()) setName(name); + } + + Module *Parent; + // Note: VC++ treats enums as signed, so an extra bit is required to prevent + // Linkage and Visibility from turning into negative values. + LinkageTypes Linkage : 5; // The linkage of this global + unsigned Visibility : 2; // The visibility style of this global + unsigned Alignment : 16; // Alignment of this symbol, must be power of two + std::string Section; // Section to emit this into, empty mean default +public: + ~GlobalValue() { + removeDeadConstantUsers(); // remove any dead constants using this. + } + + unsigned getAlignment() const { return Alignment; } + void setAlignment(unsigned Align) { + assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); + Alignment = Align; + } + + VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); } + bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; } + bool hasProtectedVisibility() const { + return Visibility == ProtectedVisibility; + } + void setVisibility(VisibilityTypes V) { Visibility = V; } + + bool hasSection() const { return !Section.empty(); } + const std::string &getSection() const { return Section; } + void setSection(const std::string &S) { Section = S; } + + /// If the usage is empty (except transitively dead constants), then this + /// global value can can be safely deleted since the destructor will + /// delete the dead constants as well. + /// @brief Determine if the usage of this global value is empty except + /// for transitively dead constants. + bool use_empty_except_constants(); + + /// getType - Global values are always pointers. + inline const PointerType *getType() const { + return reinterpret_cast<const PointerType*>(User::getType()); + } + + static LinkageTypes getLinkOnceLinkage(bool ODR) { + return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage; + } + static LinkageTypes getWeakLinkage(bool ODR) { + return ODR ? WeakODRLinkage : WeakAnyLinkage; + } + + bool hasExternalLinkage() const { return Linkage == ExternalLinkage; } + bool hasAvailableExternallyLinkage() const { + return Linkage == AvailableExternallyLinkage; + } + bool hasLinkOnceLinkage() const { + return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage; + } + bool hasWeakLinkage() const { + return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage; + } + bool hasAppendingLinkage() const { return Linkage == AppendingLinkage; } + bool hasInternalLinkage() const { return Linkage == InternalLinkage; } + bool hasPrivateLinkage() const { return Linkage == PrivateLinkage; } + bool hasLocalLinkage() const { + return Linkage == InternalLinkage || Linkage == PrivateLinkage; + } + bool hasDLLImportLinkage() const { return Linkage == DLLImportLinkage; } + bool hasDLLExportLinkage() const { return Linkage == DLLExportLinkage; } + bool hasExternalWeakLinkage() const { return Linkage == ExternalWeakLinkage; } + bool hasGhostLinkage() const { return Linkage == GhostLinkage; } + bool hasCommonLinkage() const { return Linkage == CommonLinkage; } + + void setLinkage(LinkageTypes LT) { Linkage = LT; } + LinkageTypes getLinkage() const { return Linkage; } + + /// mayBeOverridden - Whether the definition of this global may be replaced + /// by something non-equivalent at link time. For example, if a function has + /// weak linkage then the code defining it may be replaced by different code. + bool mayBeOverridden() const { + return (Linkage == WeakAnyLinkage || + Linkage == LinkOnceAnyLinkage || + Linkage == CommonLinkage || + Linkage == ExternalWeakLinkage); + } + + /// isWeakForLinker - Whether the definition of this global may be replaced at + /// link time. + bool isWeakForLinker() const { + return (Linkage == AvailableExternallyLinkage || + Linkage == WeakAnyLinkage || + Linkage == WeakODRLinkage || + Linkage == LinkOnceAnyLinkage || + Linkage == LinkOnceODRLinkage || + Linkage == CommonLinkage || + Linkage == ExternalWeakLinkage); + } + + /// copyAttributesFrom - copy all additional attributes (those not needed to + /// create a GlobalValue) from the GlobalValue Src to this one. + virtual void copyAttributesFrom(const GlobalValue *Src); + + /// hasNotBeenReadFromBitcode - If a module provider is being used to lazily + /// stream in functions from disk, this method can be used to check to see if + /// the function has been read in yet or not. Unless you are working on the + /// JIT or something else that streams stuff in lazily, you don't need to + /// worry about this. + bool hasNotBeenReadFromBitcode() const { return Linkage == GhostLinkage; } + + /// Override from Constant class. No GlobalValue's are null values so this + /// always returns false. + virtual bool isNullValue() const { return false; } + + /// Override from Constant class. + virtual void destroyConstant(); + + /// isDeclaration - Return true if the primary definition of this global + /// value is outside of the current translation unit... + virtual bool isDeclaration() const = 0; + + /// removeFromParent - This method unlinks 'this' from the containing module, + /// but does not delete it. + virtual void removeFromParent() = 0; + + /// eraseFromParent - This method unlinks 'this' from the containing module + /// and deletes it. + virtual void eraseFromParent() = 0; + + /// getParent - Get the module that this global value is contained inside + /// of... + inline Module *getParent() { return Parent; } + inline const Module *getParent() const { return Parent; } + + /// removeDeadConstantUsers - If there are any dead constant users dangling + /// off of this global value, remove them. This method is useful for clients + /// that want to check to see if a global is unused, but don't want to deal + /// with potentially dead constants hanging off of the globals. + void removeDeadConstantUsers() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const GlobalValue *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == Value::FunctionVal || + V->getValueID() == Value::GlobalVariableVal || + V->getValueID() == Value::GlobalAliasVal; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/GlobalVariable.h b/include/llvm/GlobalVariable.h new file mode 100644 index 0000000..ae64ccf --- /dev/null +++ b/include/llvm/GlobalVariable.h @@ -0,0 +1,159 @@ +//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the GlobalVariable class, which +// represents a single global variable (or constant) in the VM. +// +// Global variables are constant pointers that refer to hunks of space that are +// allocated by either the VM, or by the linker in a static compiler. A global +// variable may have an intial value, which is copied into the executables .data +// area. Global Constants are required to have initializers. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_GLOBAL_VARIABLE_H +#define LLVM_GLOBAL_VARIABLE_H + +#include "llvm/GlobalValue.h" +#include "llvm/OperandTraits.h" +#include "llvm/ADT/ilist_node.h" + +namespace llvm { + +class Module; +class Constant; +template<typename ValueSubClass, typename ItemParentClass> + class SymbolTableListTraits; + +class GlobalVariable : public GlobalValue, public ilist_node<GlobalVariable> { + friend class SymbolTableListTraits<GlobalVariable, Module>; + void *operator new(size_t, unsigned); // Do not implement + void operator=(const GlobalVariable &); // Do not implement + GlobalVariable(const GlobalVariable &); // Do not implement + + void setParent(Module *parent); + + bool isConstantGlobal : 1; // Is this a global constant? + bool isThreadLocalSymbol : 1; // Is this symbol "Thread Local"? + +public: + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 1); + } + /// GlobalVariable ctor - If a parent module is specified, the global is + /// automatically inserted into the end of the specified modules global list. + GlobalVariable(const Type *Ty, bool isConstant, LinkageTypes Linkage, + Constant *Initializer = 0, const std::string &Name = "", + Module *Parent = 0, bool ThreadLocal = false, + unsigned AddressSpace = 0); + /// GlobalVariable ctor - This creates a global and inserts it before the + /// specified other global. + GlobalVariable(const Type *Ty, bool isConstant, LinkageTypes Linkage, + Constant *Initializer, const std::string &Name, + GlobalVariable *InsertBefore, bool ThreadLocal = false, + unsigned AddressSpace = 0); + + ~GlobalVariable() { + NumOperands = 1; // FIXME: needed by operator delete + } + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// isDeclaration - Is this global variable lacking an initializer? If so, + /// the global variable is defined in some other translation unit, and is thus + /// only a declaration here. + virtual bool isDeclaration() const { return getNumOperands() == 0; } + + /// hasInitializer - Unless a global variable isExternal(), it has an + /// initializer. The initializer for the global variable/constant is held by + /// Initializer if an initializer is specified. + /// + inline bool hasInitializer() const { return !isDeclaration(); } + + /// hasDefinitiveInitializer - Whether the global variable has an initializer, + /// and this is the initializer that will be used in the final executable. + inline bool hasDefinitiveInitializer() const { + return hasInitializer() && + // The initializer of a global variable with weak linkage may change at + // link time. + !mayBeOverridden(); + } + + /// getInitializer - Return the initializer for this global variable. It is + /// illegal to call this method if the global is external, because we cannot + /// tell what the value is initialized to! + /// + inline /*const FIXME*/ Constant *getInitializer() const { + assert(hasInitializer() && "GV doesn't have initializer!"); + return static_cast<Constant*>(Op<0>().get()); + } + inline Constant *getInitializer() { + assert(hasInitializer() && "GV doesn't have initializer!"); + return static_cast<Constant*>(Op<0>().get()); + } + inline void setInitializer(Constant *CPV) { + if (CPV == 0) { + if (hasInitializer()) { + Op<0>().set(0); + NumOperands = 0; + } + } else { + if (!hasInitializer()) + NumOperands = 1; + Op<0>().set(CPV); + } + } + + /// If the value is a global constant, its value is immutable throughout the + /// runtime execution of the program. Assigning a value into the constant + /// leads to undefined behavior. + /// + bool isConstant() const { return isConstantGlobal; } + void setConstant(bool Val) { isConstantGlobal = Val; } + + /// If the value is "Thread Local", its value isn't shared by the threads. + bool isThreadLocal() const { return isThreadLocalSymbol; } + void setThreadLocal(bool Val) { isThreadLocalSymbol = Val; } + + /// copyAttributesFrom - copy all additional attributes (those not needed to + /// create a GlobalVariable) from the GlobalVariable Src to this one. + void copyAttributesFrom(const GlobalValue *Src); + + /// removeFromParent - This method unlinks 'this' from the containing module, + /// but does not delete it. + /// + virtual void removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing module + /// and deletes it. + /// + virtual void eraseFromParent(); + + /// Override Constant's implementation of this method so we can + /// replace constant initializers. + virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const GlobalVariable *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == Value::GlobalVariableVal; + } +}; + +template <> +struct OperandTraits<GlobalVariable> : OptionalOperandTraits<> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value) + +} // End llvm namespace + +#endif diff --git a/include/llvm/InlineAsm.h b/include/llvm/InlineAsm.h new file mode 100644 index 0000000..84292cf --- /dev/null +++ b/include/llvm/InlineAsm.h @@ -0,0 +1,152 @@ +//===-- llvm/InlineAsm.h - Class to represent inline asm strings-*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class represents the inline asm strings, which are Value*'s that are +// used as the callee operand of call instructions. InlineAsm's are uniqued +// like constants, and created via InlineAsm::get(...). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INLINEASM_H +#define LLVM_INLINEASM_H + +#include "llvm/Value.h" +#include <vector> + +namespace llvm { + +class PointerType; +class FunctionType; +class Module; + +class InlineAsm : public Value { + InlineAsm(const InlineAsm &); // do not implement + void operator=(const InlineAsm&); // do not implement + + std::string AsmString, Constraints; + bool HasSideEffects; + + InlineAsm(const FunctionType *Ty, const std::string &AsmString, + const std::string &Constraints, bool hasSideEffects); + virtual ~InlineAsm(); +public: + + /// InlineAsm::get - Return the the specified uniqued inline asm string. + /// + static InlineAsm *get(const FunctionType *Ty, const std::string &AsmString, + const std::string &Constraints, bool hasSideEffects); + + bool hasSideEffects() const { return HasSideEffects; } + + /// getType - InlineAsm's are always pointers. + /// + const PointerType *getType() const { + return reinterpret_cast<const PointerType*>(Value::getType()); + } + + /// getFunctionType - InlineAsm's are always pointers to functions. + /// + const FunctionType *getFunctionType() const; + + const std::string &getAsmString() const { return AsmString; } + const std::string &getConstraintString() const { return Constraints; } + + /// Verify - This static method can be used by the parser to check to see if + /// the specified constraint string is legal for the type. This returns true + /// if legal, false if not. + /// + static bool Verify(const FunctionType *Ty, const std::string &Constraints); + + // Constraint String Parsing + enum ConstraintPrefix { + isInput, // 'x' + isOutput, // '=x' + isClobber // '~x' + }; + + struct ConstraintInfo { + /// Type - The basic type of the constraint: input/output/clobber + /// + ConstraintPrefix Type; + + /// isEarlyClobber - "&": output operand writes result before inputs are all + /// read. This is only ever set for an output operand. + bool isEarlyClobber; + + /// MatchingInput - If this is not -1, this is an output constraint where an + /// input constraint is required to match it (e.g. "0"). The value is the + /// constraint number that matches this one (for example, if this is + /// constraint #0 and constraint #4 has the value "0", this will be 4). + signed char MatchingInput; + + /// hasMatchingInput - Return true if this is an output constraint that has + /// a matching input constraint. + bool hasMatchingInput() const { return MatchingInput != -1; } + + /// isCommutative - This is set to true for a constraint that is commutative + /// with the next operand. + bool isCommutative; + + /// isIndirect - True if this operand is an indirect operand. This means + /// that the address of the source or destination is present in the call + /// instruction, instead of it being returned or passed in explicitly. This + /// is represented with a '*' in the asm string. + bool isIndirect; + + /// Code - The constraint code, either the register name (in braces) or the + /// constraint letter/number. + std::vector<std::string> Codes; + + /// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the + /// fields in this structure. If the constraint string is not understood, + /// return true, otherwise return false. + bool Parse(const std::string &Str, + std::vector<InlineAsm::ConstraintInfo> &ConstraintsSoFar); + }; + + /// ParseConstraints - Split up the constraint string into the specific + /// constraints and their prefixes. If this returns an empty vector, and if + /// the constraint string itself isn't empty, there was an error parsing. + static std::vector<ConstraintInfo> + ParseConstraints(const std::string &ConstraintString); + + /// ParseConstraints - Parse the constraints of this inlineasm object, + /// returning them the same way that ParseConstraints(str) does. + std::vector<ConstraintInfo> + ParseConstraints() const { + return ParseConstraints(Constraints); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const InlineAsm *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() == Value::InlineAsmVal; + } + + /// getNumOperandRegisters - Extract the number of registers field from the + /// inline asm operand flag. + static unsigned getNumOperandRegisters(unsigned Flag) { + return (Flag & 0xffff) >> 3; + } + + /// isUseOperandTiedToDef - Return true if the flag of the inline asm + /// operand indicates it is an use operand that's matched to a def operand. + static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) { + if ((Flag & 0x80000000) == 0) + return false; + Idx = (Flag & ~0x80000000) >> 16; + return true; + } + + +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h new file mode 100644 index 0000000..3a774d4 --- /dev/null +++ b/include/llvm/InstrTypes.h @@ -0,0 +1,675 @@ +//===-- llvm/InstrTypes.h - Important Instruction subclasses ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various meta classes of instructions that exist in the VM +// representation. Specific concrete subclasses of these may be found in the +// i*.h files... +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INSTRUCTION_TYPES_H +#define LLVM_INSTRUCTION_TYPES_H + +#include "llvm/Instruction.h" +#include "llvm/OperandTraits.h" +#include "llvm/DerivedTypes.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +// TerminatorInst Class +//===----------------------------------------------------------------------===// + +/// TerminatorInst - Subclasses of this class are all able to terminate a basic +/// block. Thus, these are all the flow control type of operations. +/// +class TerminatorInst : public Instruction { +protected: + TerminatorInst(const Type *Ty, Instruction::TermOps iType, + Use *Ops, unsigned NumOps, + Instruction *InsertBefore = 0) + : Instruction(Ty, iType, Ops, NumOps, InsertBefore) {} + + TerminatorInst(const Type *Ty, Instruction::TermOps iType, + Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd) + : Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {} + + // Out of line virtual method, so the vtable, etc has a home. + ~TerminatorInst(); + + /// Virtual methods - Terminators should overload these and provide inline + /// overrides of non-V methods. + virtual BasicBlock *getSuccessorV(unsigned idx) const = 0; + virtual unsigned getNumSuccessorsV() const = 0; + virtual void setSuccessorV(unsigned idx, BasicBlock *B) = 0; +public: + + virtual Instruction *clone() const = 0; + + /// getNumSuccessors - Return the number of successors that this terminator + /// has. + unsigned getNumSuccessors() const { + return getNumSuccessorsV(); + } + + /// getSuccessor - Return the specified successor. + /// + BasicBlock *getSuccessor(unsigned idx) const { + return getSuccessorV(idx); + } + + /// setSuccessor - Update the specified successor to point at the provided + /// block. + void setSuccessor(unsigned idx, BasicBlock *B) { + setSuccessorV(idx, B); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const TerminatorInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->isTerminator(); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + + +//===----------------------------------------------------------------------===// +// UnaryInstruction Class +//===----------------------------------------------------------------------===// + +class UnaryInstruction : public Instruction { + void *operator new(size_t, unsigned); // Do not implement + UnaryInstruction(const UnaryInstruction&); // Do not implement + +protected: + UnaryInstruction(const Type *Ty, unsigned iType, Value *V, + Instruction *IB = 0) + : Instruction(Ty, iType, &Op<0>(), 1, IB) { + Op<0>() = V; + } + UnaryInstruction(const Type *Ty, unsigned iType, Value *V, BasicBlock *IAE) + : Instruction(Ty, iType, &Op<0>(), 1, IAE) { + Op<0>() = V; + } +public: + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 1); + } + + // Out of line virtual method, so the vtable, etc has a home. + ~UnaryInstruction(); + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const UnaryInstruction *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Malloc || + I->getOpcode() == Instruction::Alloca || + I->getOpcode() == Instruction::Free || + I->getOpcode() == Instruction::Load || + I->getOpcode() == Instruction::VAArg || + I->getOpcode() == Instruction::ExtractValue || + (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<UnaryInstruction> : FixedNumOperandTraits<1> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value) + +//===----------------------------------------------------------------------===// +// BinaryOperator Class +//===----------------------------------------------------------------------===// + +class BinaryOperator : public Instruction { + void *operator new(size_t, unsigned); // Do not implement +protected: + void init(BinaryOps iType); + BinaryOperator(BinaryOps iType, Value *S1, Value *S2, const Type *Ty, + const std::string &Name, Instruction *InsertBefore); + BinaryOperator(BinaryOps iType, Value *S1, Value *S2, const Type *Ty, + const std::string &Name, BasicBlock *InsertAtEnd); +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// Create() - Construct a binary instruction, given the opcode and the two + /// operands. Optionally (if InstBefore is specified) insert the instruction + /// into a BasicBlock right before the specified instruction. The specified + /// Instruction is allowed to be a dereferenced end iterator. + /// + static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2, + const std::string &Name = "", + Instruction *InsertBefore = 0); + + /// Create() - Construct a binary instruction, given the opcode and the two + /// operands. Also automatically insert this instruction to the end of the + /// BasicBlock specified. + /// + static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2, + const std::string &Name, + BasicBlock *InsertAtEnd); + + /// Create* - These methods just forward to Create, and are useful when you + /// statically know what type of instruction you're going to create. These + /// helpers just save some typing. +#define HANDLE_BINARY_INST(N, OPC, CLASS) \ + static BinaryOperator *Create##OPC(Value *V1, Value *V2, \ + const std::string &Name = "") {\ + return Create(Instruction::OPC, V1, V2, Name);\ + } +#include "llvm/Instruction.def" +#define HANDLE_BINARY_INST(N, OPC, CLASS) \ + static BinaryOperator *Create##OPC(Value *V1, Value *V2, \ + const std::string &Name, BasicBlock *BB) {\ + return Create(Instruction::OPC, V1, V2, Name, BB);\ + } +#include "llvm/Instruction.def" +#define HANDLE_BINARY_INST(N, OPC, CLASS) \ + static BinaryOperator *Create##OPC(Value *V1, Value *V2, \ + const std::string &Name, Instruction *I) {\ + return Create(Instruction::OPC, V1, V2, Name, I);\ + } +#include "llvm/Instruction.def" + + + /// Helper functions to construct and inspect unary operations (NEG and NOT) + /// via binary operators SUB and XOR: + /// + /// CreateNeg, CreateNot - Create the NEG and NOT + /// instructions out of SUB and XOR instructions. + /// + static BinaryOperator *CreateNeg(Value *Op, const std::string &Name = "", + Instruction *InsertBefore = 0); + static BinaryOperator *CreateNeg(Value *Op, const std::string &Name, + BasicBlock *InsertAtEnd); + static BinaryOperator *CreateNot(Value *Op, const std::string &Name = "", + Instruction *InsertBefore = 0); + static BinaryOperator *CreateNot(Value *Op, const std::string &Name, + BasicBlock *InsertAtEnd); + + /// isNeg, isNot - Check if the given Value is a NEG or NOT instruction. + /// + static bool isNeg(const Value *V); + static bool isNot(const Value *V); + + /// getNegArgument, getNotArgument - Helper functions to extract the + /// unary argument of a NEG or NOT operation implemented via Sub or Xor. + /// + static const Value *getNegArgument(const Value *BinOp); + static Value *getNegArgument( Value *BinOp); + static const Value *getNotArgument(const Value *BinOp); + static Value *getNotArgument( Value *BinOp); + + BinaryOps getOpcode() const { + return static_cast<BinaryOps>(Instruction::getOpcode()); + } + + virtual BinaryOperator *clone() const; + + /// swapOperands - Exchange the two operands to this instruction. + /// This instruction is safe to use on any binary instruction and + /// does not modify the semantics of the instruction. If the instruction + /// cannot be reversed (ie, it's a Div), then return true. + /// + bool swapOperands(); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const BinaryOperator *) { return true; } + static inline bool classof(const Instruction *I) { + return I->isBinaryOp(); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<BinaryOperator> : FixedNumOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value) + +//===----------------------------------------------------------------------===// +// CastInst Class +//===----------------------------------------------------------------------===// + +/// CastInst - This is the base class for all instructions that perform data +/// casts. It is simply provided so that instruction category testing +/// can be performed with code like: +/// +/// if (isa<CastInst>(Instr)) { ... } +/// @brief Base class of casting instructions. +class CastInst : public UnaryInstruction { + /// @brief Copy constructor + CastInst(const CastInst &CI) + : UnaryInstruction(CI.getType(), CI.getOpcode(), CI.getOperand(0)) { + } + /// @brief Do not allow default construction + CastInst(); +protected: + /// @brief Constructor with insert-before-instruction semantics for subclasses + CastInst(const Type *Ty, unsigned iType, Value *S, + const std::string &NameStr = "", Instruction *InsertBefore = 0) + : UnaryInstruction(Ty, iType, S, InsertBefore) { + setName(NameStr); + } + /// @brief Constructor with insert-at-end-of-block semantics for subclasses + CastInst(const Type *Ty, unsigned iType, Value *S, + const std::string &NameStr, BasicBlock *InsertAtEnd) + : UnaryInstruction(Ty, iType, S, InsertAtEnd) { + setName(NameStr); + } +public: + /// Provides a way to construct any of the CastInst subclasses using an + /// opcode instead of the subclass's constructor. The opcode must be in the + /// CastOps category (Instruction::isCast(opcode) returns true). This + /// constructor has insert-before-instruction semantics to automatically + /// insert the new CastInst before InsertBefore (if it is non-null). + /// @brief Construct any of the CastInst subclasses + static CastInst *Create( + Instruction::CastOps, ///< The opcode of the cast instruction + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which cast should be made + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + /// Provides a way to construct any of the CastInst subclasses using an + /// opcode instead of the subclass's constructor. The opcode must be in the + /// CastOps category. This constructor has insert-at-end-of-block semantics + /// to automatically insert the new CastInst at the end of InsertAtEnd (if + /// its non-null). + /// @brief Construct any of the CastInst subclasses + static CastInst *Create( + Instruction::CastOps, ///< The opcode for the cast instruction + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which operand is casted + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Create a ZExt or BitCast cast instruction + static CastInst *CreateZExtOrBitCast( + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which cast should be made + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + + /// @brief Create a ZExt or BitCast cast instruction + static CastInst *CreateZExtOrBitCast( + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which operand is casted + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Create a SExt or BitCast cast instruction + static CastInst *CreateSExtOrBitCast( + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which cast should be made + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + + /// @brief Create a SExt or BitCast cast instruction + static CastInst *CreateSExtOrBitCast( + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which operand is casted + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Create a BitCast or a PtrToInt cast instruction + static CastInst *CreatePointerCast( + Value *S, ///< The pointer value to be casted (operand 0) + const Type *Ty, ///< The type to which operand is casted + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Create a BitCast or a PtrToInt cast instruction + static CastInst *CreatePointerCast( + Value *S, ///< The pointer value to be casted (operand 0) + const Type *Ty, ///< The type to which cast should be made + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + + /// @brief Create a ZExt, BitCast, or Trunc for int -> int casts. + static CastInst *CreateIntegerCast( + Value *S, ///< The pointer value to be casted (operand 0) + const Type *Ty, ///< The type to which cast should be made + bool isSigned, ///< Whether to regard S as signed or not + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + + /// @brief Create a ZExt, BitCast, or Trunc for int -> int casts. + static CastInst *CreateIntegerCast( + Value *S, ///< The integer value to be casted (operand 0) + const Type *Ty, ///< The integer type to which operand is casted + bool isSigned, ///< Whether to regard S as signed or not + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts + static CastInst *CreateFPCast( + Value *S, ///< The floating point value to be casted + const Type *Ty, ///< The floating point type to cast to + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + + /// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts + static CastInst *CreateFPCast( + Value *S, ///< The floating point value to be casted + const Type *Ty, ///< The floating point type to cast to + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Create a Trunc or BitCast cast instruction + static CastInst *CreateTruncOrBitCast( + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which cast should be made + const std::string &Name = "", ///< Name for the instruction + Instruction *InsertBefore = 0 ///< Place to insert the instruction + ); + + /// @brief Create a Trunc or BitCast cast instruction + static CastInst *CreateTruncOrBitCast( + Value *S, ///< The value to be casted (operand 0) + const Type *Ty, ///< The type to which operand is casted + const std::string &Name, ///< The name for the instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Check whether it is valid to call getCastOpcode for these types. + static bool isCastable( + const Type *SrcTy, ///< The Type from which the value should be cast. + const Type *DestTy ///< The Type to which the value should be cast. + ); + + /// Returns the opcode necessary to cast Val into Ty using usual casting + /// rules. + /// @brief Infer the opcode for cast operand and type + static Instruction::CastOps getCastOpcode( + const Value *Val, ///< The value to cast + bool SrcIsSigned, ///< Whether to treat the source as signed + const Type *Ty, ///< The Type to which the value should be casted + bool DstIsSigned ///< Whether to treate the dest. as signed + ); + + /// There are several places where we need to know if a cast instruction + /// only deals with integer source and destination types. To simplify that + /// logic, this method is provided. + /// @returns true iff the cast has only integral typed operand and dest type. + /// @brief Determine if this is an integer-only cast. + bool isIntegerCast() const; + + /// A lossless cast is one that does not alter the basic value. It implies + /// a no-op cast but is more stringent, preventing things like int->float, + /// long->double, int->ptr, or vector->anything. + /// @returns true iff the cast is lossless. + /// @brief Determine if this is a lossless cast. + bool isLosslessCast() const; + + /// A no-op cast is one that can be effected without changing any bits. + /// It implies that the source and destination types are the same size. The + /// IntPtrTy argument is used to make accurate determinations for casts + /// involving Integer and Pointer types. They are no-op casts if the integer + /// is the same size as the pointer. However, pointer size varies with + /// platform. Generally, the result of TargetData::getIntPtrType() should be + /// passed in. If that's not available, use Type::Int64Ty, which will make + /// the isNoopCast call conservative. + /// @brief Determine if this cast is a no-op cast. + bool isNoopCast( + const Type *IntPtrTy ///< Integer type corresponding to pointer + ) const; + + /// Determine how a pair of casts can be eliminated, if they can be at all. + /// This is a helper function for both CastInst and ConstantExpr. + /// @returns 0 if the CastInst pair can't be eliminated + /// @returns Instruction::CastOps value for a cast that can replace + /// the pair, casting SrcTy to DstTy. + /// @brief Determine if a cast pair is eliminable + static unsigned isEliminableCastPair( + Instruction::CastOps firstOpcode, ///< Opcode of first cast + Instruction::CastOps secondOpcode, ///< Opcode of second cast + const Type *SrcTy, ///< SrcTy of 1st cast + const Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast + const Type *DstTy, ///< DstTy of 2nd cast + const Type *IntPtrTy ///< Integer type corresponding to Ptr types + ); + + /// @brief Return the opcode of this CastInst + Instruction::CastOps getOpcode() const { + return Instruction::CastOps(Instruction::getOpcode()); + } + + /// @brief Return the source type, as a convenience + const Type* getSrcTy() const { return getOperand(0)->getType(); } + /// @brief Return the destination type, as a convenience + const Type* getDestTy() const { return getType(); } + + /// This method can be used to determine if a cast from S to DstTy using + /// Opcode op is valid or not. + /// @returns true iff the proposed cast is valid. + /// @brief Determine if a cast is valid without creating one. + static bool castIsValid(Instruction::CastOps op, Value *S, const Type *DstTy); + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const CastInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->isCast(); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// CmpInst Class +//===----------------------------------------------------------------------===// + +/// This class is the base class for the comparison instructions. +/// @brief Abstract base class of comparison instructions. +// FIXME: why not derive from BinaryOperator? +class CmpInst: public Instruction { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + CmpInst(); // do not implement +protected: + CmpInst(const Type *ty, Instruction::OtherOps op, unsigned short pred, + Value *LHS, Value *RHS, const std::string &Name = "", + Instruction *InsertBefore = 0); + + CmpInst(const Type *ty, Instruction::OtherOps op, unsigned short pred, + Value *LHS, Value *RHS, const std::string &Name, + BasicBlock *InsertAtEnd); + +public: + /// This enumeration lists the possible predicates for CmpInst subclasses. + /// Values in the range 0-31 are reserved for FCmpInst, while values in the + /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the + /// predicate values are not overlapping between the classes. + enum Predicate { + // Opcode U L G E Intuitive operation + FCMP_FALSE = 0, /// 0 0 0 0 Always false (always folded) + FCMP_OEQ = 1, /// 0 0 0 1 True if ordered and equal + FCMP_OGT = 2, /// 0 0 1 0 True if ordered and greater than + FCMP_OGE = 3, /// 0 0 1 1 True if ordered and greater than or equal + FCMP_OLT = 4, /// 0 1 0 0 True if ordered and less than + FCMP_OLE = 5, /// 0 1 0 1 True if ordered and less than or equal + FCMP_ONE = 6, /// 0 1 1 0 True if ordered and operands are unequal + FCMP_ORD = 7, /// 0 1 1 1 True if ordered (no nans) + FCMP_UNO = 8, /// 1 0 0 0 True if unordered: isnan(X) | isnan(Y) + FCMP_UEQ = 9, /// 1 0 0 1 True if unordered or equal + FCMP_UGT = 10, /// 1 0 1 0 True if unordered or greater than + FCMP_UGE = 11, /// 1 0 1 1 True if unordered, greater than, or equal + FCMP_ULT = 12, /// 1 1 0 0 True if unordered or less than + FCMP_ULE = 13, /// 1 1 0 1 True if unordered, less than, or equal + FCMP_UNE = 14, /// 1 1 1 0 True if unordered or not equal + FCMP_TRUE = 15, /// 1 1 1 1 Always true (always folded) + FIRST_FCMP_PREDICATE = FCMP_FALSE, + LAST_FCMP_PREDICATE = FCMP_TRUE, + BAD_FCMP_PREDICATE = FCMP_TRUE + 1, + ICMP_EQ = 32, /// equal + ICMP_NE = 33, /// not equal + ICMP_UGT = 34, /// unsigned greater than + ICMP_UGE = 35, /// unsigned greater or equal + ICMP_ULT = 36, /// unsigned less than + ICMP_ULE = 37, /// unsigned less or equal + ICMP_SGT = 38, /// signed greater than + ICMP_SGE = 39, /// signed greater or equal + ICMP_SLT = 40, /// signed less than + ICMP_SLE = 41, /// signed less or equal + FIRST_ICMP_PREDICATE = ICMP_EQ, + LAST_ICMP_PREDICATE = ICMP_SLE, + BAD_ICMP_PREDICATE = ICMP_SLE + 1 + }; + + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + /// Construct a compare instruction, given the opcode, the predicate and + /// the two operands. Optionally (if InstBefore is specified) insert the + /// instruction into a BasicBlock right before the specified instruction. + /// The specified Instruction is allowed to be a dereferenced end iterator. + /// @brief Create a CmpInst + static CmpInst *Create(OtherOps Op, unsigned short predicate, Value *S1, + Value *S2, const std::string &Name = "", + Instruction *InsertBefore = 0); + + /// Construct a compare instruction, given the opcode, the predicate and the + /// two operands. Also automatically insert this instruction to the end of + /// the BasicBlock specified. + /// @brief Create a CmpInst + static CmpInst *Create(OtherOps Op, unsigned short predicate, Value *S1, + Value *S2, const std::string &Name, + BasicBlock *InsertAtEnd); + + /// @brief Get the opcode casted to the right type + OtherOps getOpcode() const { + return static_cast<OtherOps>(Instruction::getOpcode()); + } + + /// @brief Return the predicate for this instruction. + Predicate getPredicate() const { return Predicate(SubclassData); } + + /// @brief Set the predicate for this instruction to the specified value. + void setPredicate(Predicate P) { SubclassData = P; } + + /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE, + /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc. + /// @returns the inverse predicate for the instruction's current predicate. + /// @brief Return the inverse of the instruction's predicate. + Predicate getInversePredicate() const { + return getInversePredicate(getPredicate()); + } + + /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE, + /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc. + /// @returns the inverse predicate for predicate provided in \p pred. + /// @brief Return the inverse of a given predicate + static Predicate getInversePredicate(Predicate pred); + + /// For example, EQ->EQ, SLE->SGE, ULT->UGT, + /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc. + /// @returns the predicate that would be the result of exchanging the two + /// operands of the CmpInst instruction without changing the result + /// produced. + /// @brief Return the predicate as if the operands were swapped + Predicate getSwappedPredicate() const { + return getSwappedPredicate(getPredicate()); + } + + /// This is a static version that you can use without an instruction + /// available. + /// @brief Return the predicate as if the operands were swapped. + static Predicate getSwappedPredicate(Predicate pred); + + /// @brief Provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// This is just a convenience that dispatches to the subclasses. + /// @brief Swap the operands and adjust predicate accordingly to retain + /// the same comparison. + void swapOperands(); + + /// This is just a convenience that dispatches to the subclasses. + /// @brief Determine if this CmpInst is commutative. + bool isCommutative(); + + /// This is just a convenience that dispatches to the subclasses. + /// @brief Determine if this is an equals/not equals predicate. + bool isEquality(); + + /// @returns true if the predicate is unsigned, false otherwise. + /// @brief Determine if the predicate is an unsigned operation. + static bool isUnsigned(unsigned short predicate); + + /// @returns true if the predicate is signed, false otherwise. + /// @brief Determine if the predicate is an signed operation. + static bool isSigned(unsigned short predicate); + + /// @brief Determine if the predicate is an ordered operation. + static bool isOrdered(unsigned short predicate); + + /// @brief Determine if the predicate is an unordered operation. + static bool isUnordered(unsigned short predicate); + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const CmpInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::ICmp || + I->getOpcode() == Instruction::FCmp || + I->getOpcode() == Instruction::VICmp || + I->getOpcode() == Instruction::VFCmp; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } + /// @brief Create a result type for fcmp/icmp (but not vicmp/vfcmp) + static const Type* makeCmpResultType(const Type* opnd_type) { + if (const VectorType* vt = dyn_cast<const VectorType>(opnd_type)) { + return VectorType::get(Type::Int1Ty, vt->getNumElements()); + } + return Type::Int1Ty; + } +}; + + +// FIXME: these are redundant if CmpInst < BinaryOperator +template <> +struct OperandTraits<CmpInst> : FixedNumOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value) + +} // End llvm namespace + +#endif diff --git a/include/llvm/Instruction.def b/include/llvm/Instruction.def new file mode 100644 index 0000000..66bf2ce --- /dev/null +++ b/include/llvm/Instruction.def @@ -0,0 +1,196 @@ +//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains descriptions of the various LLVM instructions. This is +// used as a central place for enumerating the different instructions and +// should eventually be the place to put comments about the instructions. +// +//===----------------------------------------------------------------------===// + +// NOTE: NO INCLUDE GUARD DESIRED! + +// Provide definitions of macros so that users of this file do not have to +// define everything to use it... +// +#ifndef FIRST_TERM_INST +#define FIRST_TERM_INST(num) +#endif +#ifndef HANDLE_TERM_INST +#ifndef HANDLE_INST +#define HANDLE_TERM_INST(num, opcode, Class) +#else +#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class) +#endif +#endif +#ifndef LAST_TERM_INST +#define LAST_TERM_INST(num) +#endif + +#ifndef FIRST_BINARY_INST +#define FIRST_BINARY_INST(num) +#endif +#ifndef HANDLE_BINARY_INST +#ifndef HANDLE_INST +#define HANDLE_BINARY_INST(num, opcode, instclass) +#else +#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class) +#endif +#endif +#ifndef LAST_BINARY_INST +#define LAST_BINARY_INST(num) +#endif + +#ifndef FIRST_MEMORY_INST +#define FIRST_MEMORY_INST(num) +#endif +#ifndef HANDLE_MEMORY_INST +#ifndef HANDLE_INST +#define HANDLE_MEMORY_INST(num, opcode, Class) +#else +#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class) +#endif +#endif +#ifndef LAST_MEMORY_INST +#define LAST_MEMORY_INST(num) +#endif + +#ifndef FIRST_CAST_INST +#define FIRST_CAST_INST(num) +#endif +#ifndef HANDLE_CAST_INST +#ifndef HANDLE_INST +#define HANDLE_CAST_INST(num, opcode, Class) +#else +#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class) +#endif +#endif +#ifndef LAST_CAST_INST +#define LAST_CAST_INST(num) +#endif + +#ifndef FIRST_OTHER_INST +#define FIRST_OTHER_INST(num) +#endif +#ifndef HANDLE_OTHER_INST +#ifndef HANDLE_INST +#define HANDLE_OTHER_INST(num, opcode, Class) +#else +#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class) +#endif +#endif +#ifndef LAST_OTHER_INST +#define LAST_OTHER_INST(num) +#endif + + +// Terminator Instructions - These instructions are used to terminate a basic +// block of the program. Every basic block must end with one of these +// instructions for it to be a well formed basic block. +// + FIRST_TERM_INST ( 1) +HANDLE_TERM_INST ( 1, Ret , ReturnInst) +HANDLE_TERM_INST ( 2, Br , BranchInst) +HANDLE_TERM_INST ( 3, Switch , SwitchInst) +HANDLE_TERM_INST ( 4, Invoke , InvokeInst) +HANDLE_TERM_INST ( 5, Unwind , UnwindInst) +HANDLE_TERM_INST ( 6, Unreachable, UnreachableInst) + LAST_TERM_INST ( 6) + +// Standard binary operators... + FIRST_BINARY_INST( 7) +HANDLE_BINARY_INST( 7, Add , BinaryOperator) +HANDLE_BINARY_INST( 8, Sub , BinaryOperator) +HANDLE_BINARY_INST( 9, Mul , BinaryOperator) +HANDLE_BINARY_INST(10, UDiv , BinaryOperator) +HANDLE_BINARY_INST(11, SDiv , BinaryOperator) +HANDLE_BINARY_INST(12, FDiv , BinaryOperator) +HANDLE_BINARY_INST(13, URem , BinaryOperator) +HANDLE_BINARY_INST(14, SRem , BinaryOperator) +HANDLE_BINARY_INST(15, FRem , BinaryOperator) + +// Logical operators (integer operands) +HANDLE_BINARY_INST(16, Shl , BinaryOperator) // Shift left (logical) +HANDLE_BINARY_INST(17, LShr , BinaryOperator) // Shift right (logical) +HANDLE_BINARY_INST(18, AShr , BinaryOperator) // Shift right (arithmetic) +HANDLE_BINARY_INST(19, And , BinaryOperator) +HANDLE_BINARY_INST(20, Or , BinaryOperator) +HANDLE_BINARY_INST(21, Xor , BinaryOperator) + LAST_BINARY_INST(21) + +// Memory operators... + FIRST_MEMORY_INST(22) +HANDLE_MEMORY_INST(22, Malloc, MallocInst) // Heap management instructions +HANDLE_MEMORY_INST(23, Free , FreeInst ) +HANDLE_MEMORY_INST(24, Alloca, AllocaInst) // Stack management +HANDLE_MEMORY_INST(25, Load , LoadInst ) // Memory manipulation instrs +HANDLE_MEMORY_INST(26, Store , StoreInst ) +HANDLE_MEMORY_INST(27, GetElementPtr, GetElementPtrInst) + LAST_MEMORY_INST(27) + +// Cast operators ... +// NOTE: The order matters here because CastInst::isEliminableCastPair +// NOTE: (see Instructions.cpp) encodes a table based on this ordering. + FIRST_CAST_INST(28) +HANDLE_CAST_INST(28, Trunc , TruncInst ) // Truncate integers +HANDLE_CAST_INST(29, ZExt , ZExtInst ) // Zero extend integers +HANDLE_CAST_INST(30, SExt , SExtInst ) // Sign extend integers +HANDLE_CAST_INST(31, FPToUI , FPToUIInst ) // floating point -> UInt +HANDLE_CAST_INST(32, FPToSI , FPToSIInst ) // floating point -> SInt +HANDLE_CAST_INST(33, UIToFP , UIToFPInst ) // UInt -> floating point +HANDLE_CAST_INST(34, SIToFP , SIToFPInst ) // SInt -> floating point +HANDLE_CAST_INST(35, FPTrunc , FPTruncInst ) // Truncate floating point +HANDLE_CAST_INST(36, FPExt , FPExtInst ) // Extend floating point +HANDLE_CAST_INST(37, PtrToInt, PtrToIntInst) // Pointer -> Integer +HANDLE_CAST_INST(38, IntToPtr, IntToPtrInst) // Integer -> Pointer +HANDLE_CAST_INST(39, BitCast , BitCastInst ) // Type cast + LAST_CAST_INST(39) + +// Other operators... + FIRST_OTHER_INST(40) +HANDLE_OTHER_INST(40, ICmp , ICmpInst ) // Integer comparison instruction +HANDLE_OTHER_INST(41, FCmp , FCmpInst ) // Floating point comparison instr. +HANDLE_OTHER_INST(42, PHI , PHINode ) // PHI node instruction +HANDLE_OTHER_INST(43, Call , CallInst ) // Call a function +HANDLE_OTHER_INST(44, Select , SelectInst ) // select instruction +HANDLE_OTHER_INST(45, UserOp1, Instruction) // May be used internally in a pass +HANDLE_OTHER_INST(46, UserOp2, Instruction) // Internal to passes only +HANDLE_OTHER_INST(47, VAArg , VAArgInst ) // vaarg instruction +HANDLE_OTHER_INST(48, ExtractElement, ExtractElementInst)// extract from vector +HANDLE_OTHER_INST(49, InsertElement, InsertElementInst) // insert into vector +HANDLE_OTHER_INST(50, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. +HANDLE_OTHER_INST(51, ExtractValue, ExtractValueInst)// extract from aggregate +HANDLE_OTHER_INST(52, InsertValue, InsertValueInst) // insert into aggregate +HANDLE_OTHER_INST(53, VICmp , VICmpInst ) // Vec Int comparison instruction. +HANDLE_OTHER_INST(54, VFCmp , VFCmpInst ) // Vec FP point comparison instr. + + LAST_OTHER_INST(55) + +#undef FIRST_TERM_INST +#undef HANDLE_TERM_INST +#undef LAST_TERM_INST + +#undef FIRST_BINARY_INST +#undef HANDLE_BINARY_INST +#undef LAST_BINARY_INST + +#undef FIRST_MEMORY_INST +#undef HANDLE_MEMORY_INST +#undef LAST_MEMORY_INST + +#undef FIRST_CAST_INST +#undef HANDLE_CAST_INST +#undef LAST_CAST_INST + +#undef FIRST_OTHER_INST +#undef HANDLE_OTHER_INST +#undef LAST_OTHER_INST + +#ifdef HANDLE_INST +#undef HANDLE_INST +#endif diff --git a/include/llvm/Instruction.h b/include/llvm/Instruction.h new file mode 100644 index 0000000..7d946e8 --- /dev/null +++ b/include/llvm/Instruction.h @@ -0,0 +1,253 @@ +//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the Instruction class, which is the +// base class for all of the LLVM instructions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INSTRUCTION_H +#define LLVM_INSTRUCTION_H + +#include "llvm/User.h" +#include "llvm/ADT/ilist_node.h" + +namespace llvm { + +template<typename ValueSubClass, typename ItemParentClass> + class SymbolTableListTraits; + +class Instruction : public User, public ilist_node<Instruction> { + void operator=(const Instruction &); // Do not implement + Instruction(const Instruction &); // Do not implement + + BasicBlock *Parent; + + friend class SymbolTableListTraits<Instruction, BasicBlock>; + void setParent(BasicBlock *P); +protected: + Instruction(const Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, + Instruction *InsertBefore = 0); + Instruction(const Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, + BasicBlock *InsertAtEnd); +public: + // Out of line virtual method, so the vtable, etc has a home. + ~Instruction(); + + /// clone() - Create a copy of 'this' instruction that is identical in all + /// ways except the following: + /// * The instruction has no parent + /// * The instruction has no name + /// + virtual Instruction *clone() const = 0; + + /// isIdenticalTo - Return true if the specified instruction is exactly + /// identical to the current one. This means that all operands match and any + /// extra information (e.g. load is volatile) agree. + bool isIdenticalTo(const Instruction *I) const; + + /// This function determines if the specified instruction executes the same + /// operation as the current one. This means that the opcodes, type, operand + /// types and any other factors affecting the operation must be the same. This + /// is similar to isIdenticalTo except the operands themselves don't have to + /// be identical. + /// @returns true if the specified instruction is the same operation as + /// the current one. + /// @brief Determine if one instruction is the same operation as another. + bool isSameOperationAs(const Instruction *I) const; + + /// isUsedOutsideOfBlock - Return true if there are any uses of this + /// instruction in blocks other than the specified block. Note that PHI nodes + /// are considered to evaluate their operands in the corresponding predecessor + /// block. + bool isUsedOutsideOfBlock(const BasicBlock *BB) const; + + + /// use_back - Specialize the methods defined in Value, as we know that an + /// instruction can only be used by other instructions. + Instruction *use_back() { return cast<Instruction>(*use_begin());} + const Instruction *use_back() const { return cast<Instruction>(*use_begin());} + + // Accessor methods... + // + inline const BasicBlock *getParent() const { return Parent; } + inline BasicBlock *getParent() { return Parent; } + + /// removeFromParent - This method unlinks 'this' from the containing basic + /// block, but does not delete it. + /// + void removeFromParent(); + + /// eraseFromParent - This method unlinks 'this' from the containing basic + /// block and deletes it. + /// + void eraseFromParent(); + + /// insertBefore - Insert an unlinked instructions into a basic block + /// immediately before the specified instruction. + void insertBefore(Instruction *InsertPos); + + /// insertAfter - Insert an unlinked instructions into a basic block + /// immediately after the specified instruction. + void insertAfter(Instruction *InsertPos); + + /// moveBefore - Unlink this instruction from its current basic block and + /// insert it into the basic block that MovePos lives in, right before + /// MovePos. + void moveBefore(Instruction *MovePos); + + // --------------------------------------------------------------------------- + /// Subclass classification... getOpcode() returns a member of + /// one of the enums that is coming soon (down below)... + /// + unsigned getOpcode() const { return getValueID() - InstructionVal; } + const char *getOpcodeName() const { return getOpcodeName(getOpcode()); } + bool isTerminator() const { return isTerminator(getOpcode()); } + bool isBinaryOp() const { return isBinaryOp(getOpcode()); } + bool isShift() { return isShift(getOpcode()); } + bool isCast() const { return isCast(getOpcode()); } + + + + static const char* getOpcodeName(unsigned OpCode); + + static inline bool isTerminator(unsigned OpCode) { + return OpCode >= TermOpsBegin && OpCode < TermOpsEnd; + } + + static inline bool isBinaryOp(unsigned Opcode) { + return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd; + } + + /// @brief Determine if the Opcode is one of the shift instructions. + static inline bool isShift(unsigned Opcode) { + return Opcode >= Shl && Opcode <= AShr; + } + + /// isLogicalShift - Return true if this is a logical shift left or a logical + /// shift right. + inline bool isLogicalShift() const { + return getOpcode() == Shl || getOpcode() == LShr; + } + + /// isArithmeticShift - Return true if this is an arithmetic shift right. + inline bool isArithmeticShift() const { + return getOpcode() == AShr; + } + + /// @brief Determine if the OpCode is one of the CastInst instructions. + static inline bool isCast(unsigned OpCode) { + return OpCode >= CastOpsBegin && OpCode < CastOpsEnd; + } + + /// isAssociative - Return true if the instruction is associative: + /// + /// Associative operators satisfy: x op (y op z) === (x op y) op z + /// + /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative, when + /// not applied to floating point types. + /// + bool isAssociative() const { return isAssociative(getOpcode(), getType()); } + static bool isAssociative(unsigned op, const Type *Ty); + + /// isCommutative - Return true if the instruction is commutative: + /// + /// Commutative operators satisfy: (x op y) === (y op x) + /// + /// In LLVM, these are the associative operators, plus SetEQ and SetNE, when + /// applied to any type. + /// + bool isCommutative() const { return isCommutative(getOpcode()); } + static bool isCommutative(unsigned op); + + /// isTrapping - Return true if the instruction may trap. + /// + bool isTrapping() const { + return isTrapping(getOpcode()); + } + static bool isTrapping(unsigned op); + + /// mayWriteToMemory - Return true if this instruction may modify memory. + /// + bool mayWriteToMemory() const; + + /// mayReadFromMemory - Return true if this instruction may read memory. + /// + bool mayReadFromMemory() const; + + /// mayThrow - Return true if this instruction may throw an exception. + /// + bool mayThrow() const; + + /// mayHaveSideEffects - Return true if the instruction may have side effects. + /// + bool mayHaveSideEffects() const { + return mayWriteToMemory() || mayThrow(); + } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const Instruction *) { return true; } + static inline bool classof(const Value *V) { + return V->getValueID() >= Value::InstructionVal; + } + + //---------------------------------------------------------------------- + // Exported enumerations... + // + enum TermOps { // These terminate basic blocks +#define FIRST_TERM_INST(N) TermOpsBegin = N, +#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N, +#define LAST_TERM_INST(N) TermOpsEnd = N+1 +#include "llvm/Instruction.def" + }; + + enum BinaryOps { +#define FIRST_BINARY_INST(N) BinaryOpsBegin = N, +#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N, +#define LAST_BINARY_INST(N) BinaryOpsEnd = N+1 +#include "llvm/Instruction.def" + }; + + enum MemoryOps { +#define FIRST_MEMORY_INST(N) MemoryOpsBegin = N, +#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N, +#define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1 +#include "llvm/Instruction.def" + }; + + enum CastOps { +#define FIRST_CAST_INST(N) CastOpsBegin = N, +#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N, +#define LAST_CAST_INST(N) CastOpsEnd = N+1 +#include "llvm/Instruction.def" + }; + + enum OtherOps { +#define FIRST_OTHER_INST(N) OtherOpsBegin = N, +#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N, +#define LAST_OTHER_INST(N) OtherOpsEnd = N+1 +#include "llvm/Instruction.def" + }; +}; + +// Instruction* is only 4-byte aligned. +template<> +class PointerLikeTypeTraits<Instruction*> { + typedef Instruction* PT; +public: + static inline void *getAsVoidPointer(PT P) { return P; } + static inline PT getFromVoidPointer(void *P) { + return static_cast<PT>(P); + } + enum { NumLowBitsAvailable = 2 }; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h new file mode 100644 index 0000000..59ae610 --- /dev/null +++ b/include/llvm/Instructions.h @@ -0,0 +1,3157 @@ +//===-- llvm/Instructions.h - Instruction subclass definitions --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes the class definitions of all of the subclasses of the +// Instruction class. This is meant to be an easy way to get access to all +// instruction subclasses. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INSTRUCTIONS_H +#define LLVM_INSTRUCTIONS_H + +#include "llvm/InstrTypes.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Attributes.h" +#include "llvm/BasicBlock.h" +#include "llvm/ADT/SmallVector.h" +#include <iterator> + +namespace llvm { + +class ConstantInt; +class ConstantRange; +class APInt; + +//===----------------------------------------------------------------------===// +// AllocationInst Class +//===----------------------------------------------------------------------===// + +/// AllocationInst - This class is the common base class of MallocInst and +/// AllocaInst. +/// +class AllocationInst : public UnaryInstruction { +protected: + AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy, unsigned Align, + const std::string &Name = "", Instruction *InsertBefore = 0); + AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy, unsigned Align, + const std::string &Name, BasicBlock *InsertAtEnd); +public: + // Out of line virtual method, so the vtable, etc. has a home. + virtual ~AllocationInst(); + + /// isArrayAllocation - Return true if there is an allocation size parameter + /// to the allocation instruction that is not 1. + /// + bool isArrayAllocation() const; + + /// getArraySize - Get the number of element allocated, for a simple + /// allocation of a single element, this will return a constant 1 value. + /// + const Value *getArraySize() const { return getOperand(0); } + Value *getArraySize() { return getOperand(0); } + + /// getType - Overload to return most specific pointer type + /// + const PointerType *getType() const { + return reinterpret_cast<const PointerType*>(Instruction::getType()); + } + + /// getAllocatedType - Return the type that is being allocated by the + /// instruction. + /// + const Type *getAllocatedType() const; + + /// getAlignment - Return the alignment of the memory that is being allocated + /// by the instruction. + /// + unsigned getAlignment() const { return (1u << SubclassData) >> 1; } + void setAlignment(unsigned Align); + + virtual Instruction *clone() const = 0; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const AllocationInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Alloca || + I->getOpcode() == Instruction::Malloc; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + + +//===----------------------------------------------------------------------===// +// MallocInst Class +//===----------------------------------------------------------------------===// + +/// MallocInst - an instruction to allocated memory on the heap +/// +class MallocInst : public AllocationInst { + MallocInst(const MallocInst &MI); +public: + explicit MallocInst(const Type *Ty, Value *ArraySize = 0, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) + : AllocationInst(Ty, ArraySize, Malloc, 0, NameStr, InsertBefore) {} + MallocInst(const Type *Ty, Value *ArraySize, const std::string &NameStr, + BasicBlock *InsertAtEnd) + : AllocationInst(Ty, ArraySize, Malloc, 0, NameStr, InsertAtEnd) {} + + MallocInst(const Type *Ty, const std::string &NameStr, + Instruction *InsertBefore = 0) + : AllocationInst(Ty, 0, Malloc, 0, NameStr, InsertBefore) {} + MallocInst(const Type *Ty, const std::string &NameStr, + BasicBlock *InsertAtEnd) + : AllocationInst(Ty, 0, Malloc, 0, NameStr, InsertAtEnd) {} + + MallocInst(const Type *Ty, Value *ArraySize, unsigned Align, + const std::string &NameStr, BasicBlock *InsertAtEnd) + : AllocationInst(Ty, ArraySize, Malloc, Align, NameStr, InsertAtEnd) {} + MallocInst(const Type *Ty, Value *ArraySize, unsigned Align, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) + : AllocationInst(Ty, ArraySize, Malloc, Align, NameStr, InsertBefore) {} + + virtual MallocInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MallocInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::Malloc); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + + +//===----------------------------------------------------------------------===// +// AllocaInst Class +//===----------------------------------------------------------------------===// + +/// AllocaInst - an instruction to allocate memory on the stack +/// +class AllocaInst : public AllocationInst { + AllocaInst(const AllocaInst &); +public: + explicit AllocaInst(const Type *Ty, Value *ArraySize = 0, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) + : AllocationInst(Ty, ArraySize, Alloca, 0, NameStr, InsertBefore) {} + AllocaInst(const Type *Ty, Value *ArraySize, const std::string &NameStr, + BasicBlock *InsertAtEnd) + : AllocationInst(Ty, ArraySize, Alloca, 0, NameStr, InsertAtEnd) {} + + AllocaInst(const Type *Ty, const std::string &NameStr, + Instruction *InsertBefore = 0) + : AllocationInst(Ty, 0, Alloca, 0, NameStr, InsertBefore) {} + AllocaInst(const Type *Ty, const std::string &NameStr, + BasicBlock *InsertAtEnd) + : AllocationInst(Ty, 0, Alloca, 0, NameStr, InsertAtEnd) {} + + AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align, + const std::string &NameStr = "", Instruction *InsertBefore = 0) + : AllocationInst(Ty, ArraySize, Alloca, Align, NameStr, InsertBefore) {} + AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align, + const std::string &NameStr, BasicBlock *InsertAtEnd) + : AllocationInst(Ty, ArraySize, Alloca, Align, NameStr, InsertAtEnd) {} + + virtual AllocaInst *clone() const; + + /// isStaticAlloca - Return true if this alloca is in the entry block of the + /// function and is a constant size. If so, the code generator will fold it + /// into the prolog/epilog code, so it is basically free. + bool isStaticAlloca() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const AllocaInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::Alloca); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + + +//===----------------------------------------------------------------------===// +// FreeInst Class +//===----------------------------------------------------------------------===// + +/// FreeInst - an instruction to deallocate memory +/// +class FreeInst : public UnaryInstruction { + void AssertOK(); +public: + explicit FreeInst(Value *Ptr, Instruction *InsertBefore = 0); + FreeInst(Value *Ptr, BasicBlock *InsertAfter); + + virtual FreeInst *clone() const; + + // Accessor methods for consistency with other memory operations + Value *getPointerOperand() { return getOperand(0); } + const Value *getPointerOperand() const { return getOperand(0); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FreeInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::Free); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + + +//===----------------------------------------------------------------------===// +// LoadInst Class +//===----------------------------------------------------------------------===// + +/// LoadInst - an instruction for reading from memory. This uses the +/// SubclassData field in Value to store whether or not the load is volatile. +/// +class LoadInst : public UnaryInstruction { + + LoadInst(const LoadInst &LI) + : UnaryInstruction(LI.getType(), Load, LI.getOperand(0)) { + setVolatile(LI.isVolatile()); + setAlignment(LI.getAlignment()); + +#ifndef NDEBUG + AssertOK(); +#endif + } + void AssertOK(); +public: + LoadInst(Value *Ptr, const std::string &NameStr, Instruction *InsertBefore); + LoadInst(Value *Ptr, const std::string &NameStr, BasicBlock *InsertAtEnd); + LoadInst(Value *Ptr, const std::string &NameStr, bool isVolatile = false, + Instruction *InsertBefore = 0); + LoadInst(Value *Ptr, const std::string &NameStr, bool isVolatile, + unsigned Align, Instruction *InsertBefore = 0); + LoadInst(Value *Ptr, const std::string &NameStr, bool isVolatile, + BasicBlock *InsertAtEnd); + LoadInst(Value *Ptr, const std::string &NameStr, bool isVolatile, + unsigned Align, BasicBlock *InsertAtEnd); + + LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); + LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd); + explicit LoadInst(Value *Ptr, const char *NameStr = 0, + bool isVolatile = false, Instruction *InsertBefore = 0); + LoadInst(Value *Ptr, const char *NameStr, bool isVolatile, + BasicBlock *InsertAtEnd); + + /// isVolatile - Return true if this is a load from a volatile memory + /// location. + /// + bool isVolatile() const { return SubclassData & 1; } + + /// setVolatile - Specify whether this is a volatile load or not. + /// + void setVolatile(bool V) { + SubclassData = (SubclassData & ~1) | (V ? 1 : 0); + } + + virtual LoadInst *clone() const; + + /// getAlignment - Return the alignment of the access that is being performed + /// + unsigned getAlignment() const { + return (1 << (SubclassData>>1)) >> 1; + } + + void setAlignment(unsigned Align); + + Value *getPointerOperand() { return getOperand(0); } + const Value *getPointerOperand() const { return getOperand(0); } + static unsigned getPointerOperandIndex() { return 0U; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const LoadInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Load; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + + +//===----------------------------------------------------------------------===// +// StoreInst Class +//===----------------------------------------------------------------------===// + +/// StoreInst - an instruction for storing to memory +/// +class StoreInst : public Instruction { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + + StoreInst(const StoreInst &SI) : Instruction(SI.getType(), Store, + &Op<0>(), 2) { + Op<0>() = SI.Op<0>(); + Op<1>() = SI.Op<1>(); + setVolatile(SI.isVolatile()); + setAlignment(SI.getAlignment()); + +#ifndef NDEBUG + AssertOK(); +#endif + } + void AssertOK(); +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); + StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); + StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, + Instruction *InsertBefore = 0); + StoreInst(Value *Val, Value *Ptr, bool isVolatile, + unsigned Align, Instruction *InsertBefore = 0); + StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); + StoreInst(Value *Val, Value *Ptr, bool isVolatile, + unsigned Align, BasicBlock *InsertAtEnd); + + + /// isVolatile - Return true if this is a load from a volatile memory + /// location. + /// + bool isVolatile() const { return SubclassData & 1; } + + /// setVolatile - Specify whether this is a volatile load or not. + /// + void setVolatile(bool V) { + SubclassData = (SubclassData & ~1) | (V ? 1 : 0); + } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// getAlignment - Return the alignment of the access that is being performed + /// + unsigned getAlignment() const { + return (1 << (SubclassData>>1)) >> 1; + } + + void setAlignment(unsigned Align); + + virtual StoreInst *clone() const; + + Value *getPointerOperand() { return getOperand(1); } + const Value *getPointerOperand() const { return getOperand(1); } + static unsigned getPointerOperandIndex() { return 1U; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const StoreInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Store; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<StoreInst> : FixedNumOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) + +//===----------------------------------------------------------------------===// +// GetElementPtrInst Class +//===----------------------------------------------------------------------===// + +// checkType - Simple wrapper function to give a better assertion failure +// message on bad indexes for a gep instruction. +// +static inline const Type *checkType(const Type *Ty) { + assert(Ty && "Invalid GetElementPtrInst indices for type!"); + return Ty; +} + +/// GetElementPtrInst - an instruction for type-safe pointer arithmetic to +/// access elements of arrays and structs +/// +class GetElementPtrInst : public Instruction { + GetElementPtrInst(const GetElementPtrInst &GEPI); + void init(Value *Ptr, Value* const *Idx, unsigned NumIdx, + const std::string &NameStr); + void init(Value *Ptr, Value *Idx, const std::string &NameStr); + + template<typename InputIterator> + void init(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, + // This argument ensures that we have an iterator we can + // do arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumIdx = static_cast<unsigned>(std::distance(IdxBegin, IdxEnd)); + + if (NumIdx > 0) { + // This requires that the iterator points to contiguous memory. + init(Ptr, &*IdxBegin, NumIdx, NameStr); // FIXME: for the general case + // we have to build an array here + } + else { + init(Ptr, 0, NumIdx, NameStr); + } + } + + /// getIndexedType - Returns the type of the element that would be loaded with + /// a load instruction with the specified parameters. + /// + /// Null is returned if the indices are invalid for the specified + /// pointer type. + /// + template<typename InputIterator> + static const Type *getIndexedType(const Type *Ptr, + InputIterator IdxBegin, + InputIterator IdxEnd, + // This argument ensures that we + // have an iterator we can do + // arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumIdx = static_cast<unsigned>(std::distance(IdxBegin, IdxEnd)); + + if (NumIdx > 0) + // This requires that the iterator points to contiguous memory. + return getIndexedType(Ptr, &*IdxBegin, NumIdx); + else + return getIndexedType(Ptr, (Value *const*)0, NumIdx); + } + + /// Constructors - Create a getelementptr instruction with a base pointer an + /// list of indices. The first ctor can optionally insert before an existing + /// instruction, the second appends the new instruction to the specified + /// BasicBlock. + template<typename InputIterator> + inline GetElementPtrInst(Value *Ptr, InputIterator IdxBegin, + InputIterator IdxEnd, + unsigned Values, + const std::string &NameStr, + Instruction *InsertBefore); + template<typename InputIterator> + inline GetElementPtrInst(Value *Ptr, + InputIterator IdxBegin, InputIterator IdxEnd, + unsigned Values, + const std::string &NameStr, BasicBlock *InsertAtEnd); + + /// Constructors - These two constructors are convenience methods because one + /// and two index getelementptr instructions are so common. + GetElementPtrInst(Value *Ptr, Value *Idx, const std::string &NameStr = "", + Instruction *InsertBefore = 0); + GetElementPtrInst(Value *Ptr, Value *Idx, + const std::string &NameStr, BasicBlock *InsertAtEnd); +public: + template<typename InputIterator> + static GetElementPtrInst *Create(Value *Ptr, InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + typename std::iterator_traits<InputIterator>::difference_type Values = + 1 + std::distance(IdxBegin, IdxEnd); + return new(Values) + GetElementPtrInst(Ptr, IdxBegin, IdxEnd, Values, NameStr, InsertBefore); + } + template<typename InputIterator> + static GetElementPtrInst *Create(Value *Ptr, + InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + typename std::iterator_traits<InputIterator>::difference_type Values = + 1 + std::distance(IdxBegin, IdxEnd); + return new(Values) + GetElementPtrInst(Ptr, IdxBegin, IdxEnd, Values, NameStr, InsertAtEnd); + } + + /// Constructors - These two creators are convenience methods because one + /// index getelementptr instructions are so common. + static GetElementPtrInst *Create(Value *Ptr, Value *Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new(2) GetElementPtrInst(Ptr, Idx, NameStr, InsertBefore); + } + static GetElementPtrInst *Create(Value *Ptr, Value *Idx, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new(2) GetElementPtrInst(Ptr, Idx, NameStr, InsertAtEnd); + } + + virtual GetElementPtrInst *clone() const; + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + // getType - Overload to return most specific pointer type... + const PointerType *getType() const { + return reinterpret_cast<const PointerType*>(Instruction::getType()); + } + + /// getIndexedType - Returns the type of the element that would be loaded with + /// a load instruction with the specified parameters. + /// + /// Null is returned if the indices are invalid for the specified + /// pointer type. + /// + template<typename InputIterator> + static const Type *getIndexedType(const Type *Ptr, + InputIterator IdxBegin, + InputIterator IdxEnd) { + return getIndexedType(Ptr, IdxBegin, IdxEnd, + typename std::iterator_traits<InputIterator>:: + iterator_category()); + } + + static const Type *getIndexedType(const Type *Ptr, + Value* const *Idx, unsigned NumIdx); + + static const Type *getIndexedType(const Type *Ptr, + uint64_t const *Idx, unsigned NumIdx); + + static const Type *getIndexedType(const Type *Ptr, Value *Idx); + + inline op_iterator idx_begin() { return op_begin()+1; } + inline const_op_iterator idx_begin() const { return op_begin()+1; } + inline op_iterator idx_end() { return op_end(); } + inline const_op_iterator idx_end() const { return op_end(); } + + Value *getPointerOperand() { + return getOperand(0); + } + const Value *getPointerOperand() const { + return getOperand(0); + } + static unsigned getPointerOperandIndex() { + return 0U; // get index for modifying correct operand + } + + /// getPointerOperandType - Method to return the pointer operand as a + /// PointerType. + const PointerType *getPointerOperandType() const { + return reinterpret_cast<const PointerType*>(getPointerOperand()->getType()); + } + + + unsigned getNumIndices() const { // Note: always non-negative + return getNumOperands() - 1; + } + + bool hasIndices() const { + return getNumOperands() > 1; + } + + /// hasAllZeroIndices - Return true if all of the indices of this GEP are + /// zeros. If so, the result pointer and the first operand have the same + /// value, just potentially different types. + bool hasAllZeroIndices() const; + + /// hasAllConstantIndices - Return true if all of the indices of this GEP are + /// constant integers. If so, the result pointer and the first operand have + /// a constant offset between them. + bool hasAllConstantIndices() const; + + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const GetElementPtrInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::GetElementPtr); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<GetElementPtrInst> : VariadicOperandTraits<1> { +}; + +template<typename InputIterator> +GetElementPtrInst::GetElementPtrInst(Value *Ptr, + InputIterator IdxBegin, + InputIterator IdxEnd, + unsigned Values, + const std::string &NameStr, + Instruction *InsertBefore) + : Instruction(PointerType::get(checkType( + getIndexedType(Ptr->getType(), + IdxBegin, IdxEnd)), + cast<PointerType>(Ptr->getType()) + ->getAddressSpace()), + GetElementPtr, + OperandTraits<GetElementPtrInst>::op_end(this) - Values, + Values, InsertBefore) { + init(Ptr, IdxBegin, IdxEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} +template<typename InputIterator> +GetElementPtrInst::GetElementPtrInst(Value *Ptr, + InputIterator IdxBegin, + InputIterator IdxEnd, + unsigned Values, + const std::string &NameStr, + BasicBlock *InsertAtEnd) + : Instruction(PointerType::get(checkType( + getIndexedType(Ptr->getType(), + IdxBegin, IdxEnd)), + cast<PointerType>(Ptr->getType()) + ->getAddressSpace()), + GetElementPtr, + OperandTraits<GetElementPtrInst>::op_end(this) - Values, + Values, InsertAtEnd) { + init(Ptr, IdxBegin, IdxEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} + + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) + + +//===----------------------------------------------------------------------===// +// ICmpInst Class +//===----------------------------------------------------------------------===// + +/// This instruction compares its operands according to the predicate given +/// to the constructor. It only operates on integers or pointers. The operands +/// must be identical types. +/// @brief Represent an integer comparison operator. +class ICmpInst: public CmpInst { +public: + /// @brief Constructor with insert-before-instruction semantics. + ICmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr = "", ///< Name of the instruction + Instruction *InsertBefore = 0 ///< Where to insert + ) : CmpInst(makeCmpResultType(LHS->getType()), + Instruction::ICmp, pred, LHS, RHS, NameStr, + InsertBefore) { + assert(pred >= CmpInst::FIRST_ICMP_PREDICATE && + pred <= CmpInst::LAST_ICMP_PREDICATE && + "Invalid ICmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to ICmp instruction are not of the same type!"); + // Check that the operands are the right type + assert((getOperand(0)->getType()->isIntOrIntVector() || + isa<PointerType>(getOperand(0)->getType())) && + "Invalid operand types for ICmp instruction"); + } + + /// @brief Constructor with insert-at-block-end semantics. + ICmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr, ///< Name of the instruction + BasicBlock *InsertAtEnd ///< Block to insert into. + ) : CmpInst(makeCmpResultType(LHS->getType()), + Instruction::ICmp, pred, LHS, RHS, NameStr, + InsertAtEnd) { + assert(pred >= CmpInst::FIRST_ICMP_PREDICATE && + pred <= CmpInst::LAST_ICMP_PREDICATE && + "Invalid ICmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to ICmp instruction are not of the same type!"); + // Check that the operands are the right type + assert((getOperand(0)->getType()->isIntOrIntVector() || + isa<PointerType>(getOperand(0)->getType())) && + "Invalid operand types for ICmp instruction"); + } + + /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. + /// @returns the predicate that would be the result if the operand were + /// regarded as signed. + /// @brief Return the signed version of the predicate + Predicate getSignedPredicate() const { + return getSignedPredicate(getPredicate()); + } + + /// This is a static version that you can use without an instruction. + /// @brief Return the signed version of the predicate. + static Predicate getSignedPredicate(Predicate pred); + + /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. + /// @returns the predicate that would be the result if the operand were + /// regarded as unsigned. + /// @brief Return the unsigned version of the predicate + Predicate getUnsignedPredicate() const { + return getUnsignedPredicate(getPredicate()); + } + + /// This is a static version that you can use without an instruction. + /// @brief Return the unsigned version of the predicate. + static Predicate getUnsignedPredicate(Predicate pred); + + /// isEquality - Return true if this predicate is either EQ or NE. This also + /// tests for commutativity. + static bool isEquality(Predicate P) { + return P == ICMP_EQ || P == ICMP_NE; + } + + /// isEquality - Return true if this predicate is either EQ or NE. This also + /// tests for commutativity. + bool isEquality() const { + return isEquality(getPredicate()); + } + + /// @returns true if the predicate of this ICmpInst is commutative + /// @brief Determine if this relation is commutative. + bool isCommutative() const { return isEquality(); } + + /// isRelational - Return true if the predicate is relational (not EQ or NE). + /// + bool isRelational() const { + return !isEquality(); + } + + /// isRelational - Return true if the predicate is relational (not EQ or NE). + /// + static bool isRelational(Predicate P) { + return !isEquality(P); + } + + /// @returns true if the predicate of this ICmpInst is signed, false otherwise + /// @brief Determine if this instruction's predicate is signed. + bool isSignedPredicate() const { return isSignedPredicate(getPredicate()); } + + /// @returns true if the predicate provided is signed, false otherwise + /// @brief Determine if the predicate is signed. + static bool isSignedPredicate(Predicate pred); + + /// @returns true if the specified compare predicate is + /// true when both operands are equal... + /// @brief Determine if the icmp is true when both operands are equal + static bool isTrueWhenEqual(ICmpInst::Predicate pred) { + return pred == ICmpInst::ICMP_EQ || pred == ICmpInst::ICMP_UGE || + pred == ICmpInst::ICMP_SGE || pred == ICmpInst::ICMP_ULE || + pred == ICmpInst::ICMP_SLE; + } + + /// @returns true if the specified compare instruction is + /// true when both operands are equal... + /// @brief Determine if the ICmpInst returns true when both operands are equal + bool isTrueWhenEqual() { + return isTrueWhenEqual(getPredicate()); + } + + /// Initialize a set of values that all satisfy the predicate with C. + /// @brief Make a ConstantRange for a relation with a constant value. + static ConstantRange makeConstantRange(Predicate pred, const APInt &C); + + /// Exchange the two operands to this instruction in such a way that it does + /// not modify the semantics of the instruction. The predicate value may be + /// changed to retain the same result if the predicate is order dependent + /// (e.g. ult). + /// @brief Swap operands and adjust predicate. + void swapOperands() { + SubclassData = getSwappedPredicate(); + Op<0>().swap(Op<1>()); + } + + virtual ICmpInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ICmpInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::ICmp; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } + +}; + +//===----------------------------------------------------------------------===// +// FCmpInst Class +//===----------------------------------------------------------------------===// + +/// This instruction compares its operands according to the predicate given +/// to the constructor. It only operates on floating point values or packed +/// vectors of floating point values. The operands must be identical types. +/// @brief Represents a floating point comparison operator. +class FCmpInst: public CmpInst { +public: + /// @brief Constructor with insert-before-instruction semantics. + FCmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr = "", ///< Name of the instruction + Instruction *InsertBefore = 0 ///< Where to insert + ) : CmpInst(makeCmpResultType(LHS->getType()), + Instruction::FCmp, pred, LHS, RHS, NameStr, + InsertBefore) { + assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && + "Invalid FCmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to FCmp instruction are not of the same type!"); + // Check that the operands are the right type + assert(getOperand(0)->getType()->isFPOrFPVector() && + "Invalid operand types for FCmp instruction"); + } + + /// @brief Constructor with insert-at-block-end semantics. + FCmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr, ///< Name of the instruction + BasicBlock *InsertAtEnd ///< Block to insert into. + ) : CmpInst(makeCmpResultType(LHS->getType()), + Instruction::FCmp, pred, LHS, RHS, NameStr, + InsertAtEnd) { + assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && + "Invalid FCmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to FCmp instruction are not of the same type!"); + // Check that the operands are the right type + assert(getOperand(0)->getType()->isFPOrFPVector() && + "Invalid operand types for FCmp instruction"); + } + + /// @returns true if the predicate of this instruction is EQ or NE. + /// @brief Determine if this is an equality predicate. + bool isEquality() const { + return SubclassData == FCMP_OEQ || SubclassData == FCMP_ONE || + SubclassData == FCMP_UEQ || SubclassData == FCMP_UNE; + } + + /// @returns true if the predicate of this instruction is commutative. + /// @brief Determine if this is a commutative predicate. + bool isCommutative() const { + return isEquality() || + SubclassData == FCMP_FALSE || + SubclassData == FCMP_TRUE || + SubclassData == FCMP_ORD || + SubclassData == FCMP_UNO; + } + + /// @returns true if the predicate is relational (not EQ or NE). + /// @brief Determine if this a relational predicate. + bool isRelational() const { return !isEquality(); } + + /// Exchange the two operands to this instruction in such a way that it does + /// not modify the semantics of the instruction. The predicate value may be + /// changed to retain the same result if the predicate is order dependent + /// (e.g. ult). + /// @brief Swap operands and adjust predicate. + void swapOperands() { + SubclassData = getSwappedPredicate(); + Op<0>().swap(Op<1>()); + } + + virtual FCmpInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FCmpInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::FCmp; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } + +}; + +//===----------------------------------------------------------------------===// +// VICmpInst Class +//===----------------------------------------------------------------------===// + +/// This instruction compares its operands according to the predicate given +/// to the constructor. It only operates on vectors of integers. +/// The operands must be identical types. +/// @brief Represents a vector integer comparison operator. +class VICmpInst: public CmpInst { +public: + /// @brief Constructor with insert-before-instruction semantics. + VICmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr = "", ///< Name of the instruction + Instruction *InsertBefore = 0 ///< Where to insert + ) : CmpInst(LHS->getType(), Instruction::VICmp, pred, LHS, RHS, NameStr, + InsertBefore) { + assert(pred >= CmpInst::FIRST_ICMP_PREDICATE && + pred <= CmpInst::LAST_ICMP_PREDICATE && + "Invalid VICmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to VICmp instruction are not of the same type!"); + } + + /// @brief Constructor with insert-at-block-end semantics. + VICmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr, ///< Name of the instruction + BasicBlock *InsertAtEnd ///< Block to insert into. + ) : CmpInst(LHS->getType(), Instruction::VICmp, pred, LHS, RHS, NameStr, + InsertAtEnd) { + assert(pred >= CmpInst::FIRST_ICMP_PREDICATE && + pred <= CmpInst::LAST_ICMP_PREDICATE && + "Invalid VICmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to VICmp instruction are not of the same type!"); + } + + /// @brief Return the predicate for this instruction. + Predicate getPredicate() const { return Predicate(SubclassData); } + + virtual VICmpInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const VICmpInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::VICmp; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// VFCmpInst Class +//===----------------------------------------------------------------------===// + +/// This instruction compares its operands according to the predicate given +/// to the constructor. It only operates on vectors of floating point values. +/// The operands must be identical types. +/// @brief Represents a vector floating point comparison operator. +class VFCmpInst: public CmpInst { +public: + /// @brief Constructor with insert-before-instruction semantics. + VFCmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr = "", ///< Name of the instruction + Instruction *InsertBefore = 0 ///< Where to insert + ) : CmpInst(VectorType::getInteger(cast<VectorType>(LHS->getType())), + Instruction::VFCmp, pred, LHS, RHS, NameStr, InsertBefore) { + assert(pred <= CmpInst::LAST_FCMP_PREDICATE && + "Invalid VFCmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to VFCmp instruction are not of the same type!"); + } + + /// @brief Constructor with insert-at-block-end semantics. + VFCmpInst( + Predicate pred, ///< The predicate to use for the comparison + Value *LHS, ///< The left-hand-side of the expression + Value *RHS, ///< The right-hand-side of the expression + const std::string &NameStr, ///< Name of the instruction + BasicBlock *InsertAtEnd ///< Block to insert into. + ) : CmpInst(VectorType::getInteger(cast<VectorType>(LHS->getType())), + Instruction::VFCmp, pred, LHS, RHS, NameStr, InsertAtEnd) { + assert(pred <= CmpInst::LAST_FCMP_PREDICATE && + "Invalid VFCmp predicate value"); + assert(getOperand(0)->getType() == getOperand(1)->getType() && + "Both operands to VFCmp instruction are not of the same type!"); + } + + /// @brief Return the predicate for this instruction. + Predicate getPredicate() const { return Predicate(SubclassData); } + + virtual VFCmpInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const VFCmpInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::VFCmp; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// CallInst Class +//===----------------------------------------------------------------------===// +/// CallInst - This class represents a function call, abstracting a target +/// machine's calling convention. This class uses low bit of the SubClassData +/// field to indicate whether or not this is a tail call. The rest of the bits +/// hold the calling convention of the call. +/// + +class CallInst : public Instruction { + AttrListPtr AttributeList; ///< parameter attributes for call + CallInst(const CallInst &CI); + void init(Value *Func, Value* const *Params, unsigned NumParams); + void init(Value *Func, Value *Actual1, Value *Actual2); + void init(Value *Func, Value *Actual); + void init(Value *Func); + + template<typename InputIterator> + void init(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, + // This argument ensures that we have an iterator we can + // do arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumArgs = (unsigned)std::distance(ArgBegin, ArgEnd); + + // This requires that the iterator points to contiguous memory. + init(Func, NumArgs ? &*ArgBegin : 0, NumArgs); + setName(NameStr); + } + + /// Construct a CallInst given a range of arguments. InputIterator + /// must be a random-access iterator pointing to contiguous storage + /// (e.g. a std::vector<>::iterator). Checks are made for + /// random-accessness but not for contiguous storage as that would + /// incur runtime overhead. + /// @brief Construct a CallInst from a range of arguments + template<typename InputIterator> + CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, Instruction *InsertBefore); + + /// Construct a CallInst given a range of arguments. InputIterator + /// must be a random-access iterator pointing to contiguous storage + /// (e.g. a std::vector<>::iterator). Checks are made for + /// random-accessness but not for contiguous storage as that would + /// incur runtime overhead. + /// @brief Construct a CallInst from a range of arguments + template<typename InputIterator> + inline CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, BasicBlock *InsertAtEnd); + + CallInst(Value *F, Value *Actual, const std::string& NameStr, + Instruction *InsertBefore); + CallInst(Value *F, Value *Actual, const std::string& NameStr, + BasicBlock *InsertAtEnd); + explicit CallInst(Value *F, const std::string &NameStr, + Instruction *InsertBefore); + CallInst(Value *F, const std::string &NameStr, BasicBlock *InsertAtEnd); +public: + template<typename InputIterator> + static CallInst *Create(Value *Func, + InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new((unsigned)(ArgEnd - ArgBegin + 1)) + CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertBefore); + } + template<typename InputIterator> + static CallInst *Create(Value *Func, + InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, BasicBlock *InsertAtEnd) { + return new((unsigned)(ArgEnd - ArgBegin + 1)) + CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertAtEnd); + } + static CallInst *Create(Value *F, Value *Actual, + const std::string& NameStr = "", + Instruction *InsertBefore = 0) { + return new(2) CallInst(F, Actual, NameStr, InsertBefore); + } + static CallInst *Create(Value *F, Value *Actual, const std::string& NameStr, + BasicBlock *InsertAtEnd) { + return new(2) CallInst(F, Actual, NameStr, InsertAtEnd); + } + static CallInst *Create(Value *F, const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new(1) CallInst(F, NameStr, InsertBefore); + } + static CallInst *Create(Value *F, const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new(1) CallInst(F, NameStr, InsertAtEnd); + } + + ~CallInst(); + + bool isTailCall() const { return SubclassData & 1; } + void setTailCall(bool isTC = true) { + SubclassData = (SubclassData & ~1) | unsigned(isTC); + } + + virtual CallInst *clone() const; + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// getCallingConv/setCallingConv - Get or set the calling convention of this + /// function call. + unsigned getCallingConv() const { return SubclassData >> 1; } + void setCallingConv(unsigned CC) { + SubclassData = (SubclassData & 1) | (CC << 1); + } + + /// getAttributes - Return the parameter attributes for this call. + /// + const AttrListPtr &getAttributes() const { return AttributeList; } + + /// setAttributes - Set the parameter attributes for this call. + /// + void setAttributes(const AttrListPtr &Attrs) { AttributeList = Attrs; } + + /// addAttribute - adds the attribute to the list of attributes. + void addAttribute(unsigned i, Attributes attr); + + /// removeAttribute - removes the attribute from the list of attributes. + void removeAttribute(unsigned i, Attributes attr); + + /// @brief Determine whether the call or the callee has the given attribute. + bool paramHasAttr(unsigned i, Attributes attr) const; + + /// @brief Extract the alignment for a call or parameter (0=unknown). + unsigned getParamAlignment(unsigned i) const { + return AttributeList.getParamAlignment(i); + } + + /// @brief Determine if the call does not access memory. + bool doesNotAccessMemory() const { + return paramHasAttr(~0, Attribute::ReadNone); + } + void setDoesNotAccessMemory(bool NotAccessMemory = true) { + if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone); + else removeAttribute(~0, Attribute::ReadNone); + } + + /// @brief Determine if the call does not access or only reads memory. + bool onlyReadsMemory() const { + return doesNotAccessMemory() || paramHasAttr(~0, Attribute::ReadOnly); + } + void setOnlyReadsMemory(bool OnlyReadsMemory = true) { + if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly); + else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone); + } + + /// @brief Determine if the call cannot return. + bool doesNotReturn() const { + return paramHasAttr(~0, Attribute::NoReturn); + } + void setDoesNotReturn(bool DoesNotReturn = true) { + if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn); + else removeAttribute(~0, Attribute::NoReturn); + } + + /// @brief Determine if the call cannot unwind. + bool doesNotThrow() const { + return paramHasAttr(~0, Attribute::NoUnwind); + } + void setDoesNotThrow(bool DoesNotThrow = true) { + if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind); + else removeAttribute(~0, Attribute::NoUnwind); + } + + /// @brief Determine if the call returns a structure through first + /// pointer argument. + bool hasStructRetAttr() const { + // Be friendly and also check the callee. + return paramHasAttr(1, Attribute::StructRet); + } + + /// @brief Determine if any call argument is an aggregate passed by value. + bool hasByValArgument() const { + return AttributeList.hasAttrSomewhere(Attribute::ByVal); + } + + /// getCalledFunction - Return the function called, or null if this is an + /// indirect function invocation. + /// + Function *getCalledFunction() const { + return dyn_cast<Function>(Op<0>()); + } + + /// getCalledValue - Get a pointer to the function that is invoked by this + /// instruction + const Value *getCalledValue() const { return Op<0>(); } + Value *getCalledValue() { return Op<0>(); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const CallInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Call; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<CallInst> : VariadicOperandTraits<1> { +}; + +template<typename InputIterator> +CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, BasicBlock *InsertAtEnd) + : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) + ->getElementType())->getReturnType(), + Instruction::Call, + OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1), + (unsigned)(ArgEnd - ArgBegin + 1), InsertAtEnd) { + init(Func, ArgBegin, ArgEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} + +template<typename InputIterator> +CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, Instruction *InsertBefore) + : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) + ->getElementType())->getReturnType(), + Instruction::Call, + OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1), + (unsigned)(ArgEnd - ArgBegin + 1), InsertBefore) { + init(Func, ArgBegin, ArgEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value) + +//===----------------------------------------------------------------------===// +// SelectInst Class +//===----------------------------------------------------------------------===// + +/// SelectInst - This class represents the LLVM 'select' instruction. +/// +class SelectInst : public Instruction { + void init(Value *C, Value *S1, Value *S2) { + assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); + Op<0>() = C; + Op<1>() = S1; + Op<2>() = S2; + } + + SelectInst(const SelectInst &SI) + : Instruction(SI.getType(), SI.getOpcode(), &Op<0>(), 3) { + init(SI.Op<0>(), SI.Op<1>(), SI.Op<2>()); + } + SelectInst(Value *C, Value *S1, Value *S2, const std::string &NameStr, + Instruction *InsertBefore) + : Instruction(S1->getType(), Instruction::Select, + &Op<0>(), 3, InsertBefore) { + init(C, S1, S2); + setName(NameStr); + } + SelectInst(Value *C, Value *S1, Value *S2, const std::string &NameStr, + BasicBlock *InsertAtEnd) + : Instruction(S1->getType(), Instruction::Select, + &Op<0>(), 3, InsertAtEnd) { + init(C, S1, S2); + setName(NameStr); + } +public: + static SelectInst *Create(Value *C, Value *S1, Value *S2, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); + } + static SelectInst *Create(Value *C, Value *S1, Value *S2, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); + } + + Value *getCondition() const { return Op<0>(); } + Value *getTrueValue() const { return Op<1>(); } + Value *getFalseValue() const { return Op<2>(); } + + /// areInvalidOperands - Return a string if the specified operands are invalid + /// for a select operation, otherwise return null. + static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + OtherOps getOpcode() const { + return static_cast<OtherOps>(Instruction::getOpcode()); + } + + virtual SelectInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SelectInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Select; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<SelectInst> : FixedNumOperandTraits<3> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) + +//===----------------------------------------------------------------------===// +// VAArgInst Class +//===----------------------------------------------------------------------===// + +/// VAArgInst - This class represents the va_arg llvm instruction, which returns +/// an argument of the specified type given a va_list and increments that list +/// +class VAArgInst : public UnaryInstruction { + VAArgInst(const VAArgInst &VAA) + : UnaryInstruction(VAA.getType(), VAArg, VAA.getOperand(0)) {} +public: + VAArgInst(Value *List, const Type *Ty, const std::string &NameStr = "", + Instruction *InsertBefore = 0) + : UnaryInstruction(Ty, VAArg, List, InsertBefore) { + setName(NameStr); + } + VAArgInst(Value *List, const Type *Ty, const std::string &NameStr, + BasicBlock *InsertAtEnd) + : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { + setName(NameStr); + } + + virtual VAArgInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const VAArgInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == VAArg; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// ExtractElementInst Class +//===----------------------------------------------------------------------===// + +/// ExtractElementInst - This instruction extracts a single (scalar) +/// element from a VectorType value +/// +class ExtractElementInst : public Instruction { + ExtractElementInst(const ExtractElementInst &EE) : + Instruction(EE.getType(), ExtractElement, &Op<0>(), 2) { + Op<0>() = EE.Op<0>(); + Op<1>() = EE.Op<1>(); + } + +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); // FIXME: "unsigned Idx" forms of ctor? + } + ExtractElementInst(Value *Vec, Value *Idx, const std::string &NameStr = "", + Instruction *InsertBefore = 0); + ExtractElementInst(Value *Vec, unsigned Idx, const std::string &NameStr = "", + Instruction *InsertBefore = 0); + ExtractElementInst(Value *Vec, Value *Idx, const std::string &NameStr, + BasicBlock *InsertAtEnd); + ExtractElementInst(Value *Vec, unsigned Idx, const std::string &NameStr, + BasicBlock *InsertAtEnd); + + /// isValidOperands - Return true if an extractelement instruction can be + /// formed with the specified operands. + static bool isValidOperands(const Value *Vec, const Value *Idx); + + virtual ExtractElementInst *clone() const; + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ExtractElementInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::ExtractElement; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<ExtractElementInst> : FixedNumOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) + +//===----------------------------------------------------------------------===// +// InsertElementInst Class +//===----------------------------------------------------------------------===// + +/// InsertElementInst - This instruction inserts a single (scalar) +/// element into a VectorType value +/// +class InsertElementInst : public Instruction { + InsertElementInst(const InsertElementInst &IE); + InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0); + InsertElementInst(Value *Vec, Value *NewElt, unsigned Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0); + InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, + const std::string &NameStr, BasicBlock *InsertAtEnd); + InsertElementInst(Value *Vec, Value *NewElt, unsigned Idx, + const std::string &NameStr, BasicBlock *InsertAtEnd); +public: + static InsertElementInst *Create(const InsertElementInst &IE) { + return new(IE.getNumOperands()) InsertElementInst(IE); + } + static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); + } + static InsertElementInst *Create(Value *Vec, Value *NewElt, unsigned Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); + } + static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); + } + static InsertElementInst *Create(Value *Vec, Value *NewElt, unsigned Idx, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); + } + + /// isValidOperands - Return true if an insertelement instruction can be + /// formed with the specified operands. + static bool isValidOperands(const Value *Vec, const Value *NewElt, + const Value *Idx); + + virtual InsertElementInst *clone() const; + + /// getType - Overload to return most specific vector type. + /// + const VectorType *getType() const { + return reinterpret_cast<const VectorType*>(Instruction::getType()); + } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const InsertElementInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::InsertElement; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<InsertElementInst> : FixedNumOperandTraits<3> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) + +//===----------------------------------------------------------------------===// +// ShuffleVectorInst Class +//===----------------------------------------------------------------------===// + +/// ShuffleVectorInst - This instruction constructs a fixed permutation of two +/// input vectors. +/// +class ShuffleVectorInst : public Instruction { + ShuffleVectorInst(const ShuffleVectorInst &IE); +public: + // allocate space for exactly three operands + void *operator new(size_t s) { + return User::operator new(s, 3); + } + ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, + const std::string &NameStr = "", + Instruction *InsertBefor = 0); + ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, + const std::string &NameStr, BasicBlock *InsertAtEnd); + + /// isValidOperands - Return true if a shufflevector instruction can be + /// formed with the specified operands. + static bool isValidOperands(const Value *V1, const Value *V2, + const Value *Mask); + + virtual ShuffleVectorInst *clone() const; + + /// getType - Overload to return most specific vector type. + /// + const VectorType *getType() const { + return reinterpret_cast<const VectorType*>(Instruction::getType()); + } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// getMaskValue - Return the index from the shuffle mask for the specified + /// output result. This is either -1 if the element is undef or a number less + /// than 2*numelements. + int getMaskValue(unsigned i) const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ShuffleVectorInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::ShuffleVector; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<ShuffleVectorInst> : FixedNumOperandTraits<3> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) + +//===----------------------------------------------------------------------===// +// ExtractValueInst Class +//===----------------------------------------------------------------------===// + +/// ExtractValueInst - This instruction extracts a struct member or array +/// element value from an aggregate value. +/// +class ExtractValueInst : public UnaryInstruction { + SmallVector<unsigned, 4> Indices; + + ExtractValueInst(const ExtractValueInst &EVI); + void init(const unsigned *Idx, unsigned NumIdx, + const std::string &NameStr); + void init(unsigned Idx, const std::string &NameStr); + + template<typename InputIterator> + void init(InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, + // This argument ensures that we have an iterator we can + // do arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumIdx = static_cast<unsigned>(std::distance(IdxBegin, IdxEnd)); + + // There's no fundamental reason why we require at least one index + // (other than weirdness with &*IdxBegin being invalid; see + // getelementptr's init routine for example). But there's no + // present need to support it. + assert(NumIdx > 0 && "ExtractValueInst must have at least one index"); + + // This requires that the iterator points to contiguous memory. + init(&*IdxBegin, NumIdx, NameStr); // FIXME: for the general case + // we have to build an array here + } + + /// getIndexedType - Returns the type of the element that would be extracted + /// with an extractvalue instruction with the specified parameters. + /// + /// Null is returned if the indices are invalid for the specified + /// pointer type. + /// + static const Type *getIndexedType(const Type *Agg, + const unsigned *Idx, unsigned NumIdx); + + template<typename InputIterator> + static const Type *getIndexedType(const Type *Ptr, + InputIterator IdxBegin, + InputIterator IdxEnd, + // This argument ensures that we + // have an iterator we can do + // arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumIdx = static_cast<unsigned>(std::distance(IdxBegin, IdxEnd)); + + if (NumIdx > 0) + // This requires that the iterator points to contiguous memory. + return getIndexedType(Ptr, &*IdxBegin, NumIdx); + else + return getIndexedType(Ptr, (const unsigned *)0, NumIdx); + } + + /// Constructors - Create a extractvalue instruction with a base aggregate + /// value and a list of indices. The first ctor can optionally insert before + /// an existing instruction, the second appends the new instruction to the + /// specified BasicBlock. + template<typename InputIterator> + inline ExtractValueInst(Value *Agg, InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr, + Instruction *InsertBefore); + template<typename InputIterator> + inline ExtractValueInst(Value *Agg, + InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, BasicBlock *InsertAtEnd); + + // allocate space for exactly one operand + void *operator new(size_t s) { + return User::operator new(s, 1); + } + +public: + template<typename InputIterator> + static ExtractValueInst *Create(Value *Agg, InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new + ExtractValueInst(Agg, IdxBegin, IdxEnd, NameStr, InsertBefore); + } + template<typename InputIterator> + static ExtractValueInst *Create(Value *Agg, + InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new ExtractValueInst(Agg, IdxBegin, IdxEnd, NameStr, InsertAtEnd); + } + + /// Constructors - These two creators are convenience methods because one + /// index extractvalue instructions are much more common than those with + /// more than one. + static ExtractValueInst *Create(Value *Agg, unsigned Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + unsigned Idxs[1] = { Idx }; + return new ExtractValueInst(Agg, Idxs, Idxs + 1, NameStr, InsertBefore); + } + static ExtractValueInst *Create(Value *Agg, unsigned Idx, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + unsigned Idxs[1] = { Idx }; + return new ExtractValueInst(Agg, Idxs, Idxs + 1, NameStr, InsertAtEnd); + } + + virtual ExtractValueInst *clone() const; + + /// getIndexedType - Returns the type of the element that would be extracted + /// with an extractvalue instruction with the specified parameters. + /// + /// Null is returned if the indices are invalid for the specified + /// pointer type. + /// + template<typename InputIterator> + static const Type *getIndexedType(const Type *Ptr, + InputIterator IdxBegin, + InputIterator IdxEnd) { + return getIndexedType(Ptr, IdxBegin, IdxEnd, + typename std::iterator_traits<InputIterator>:: + iterator_category()); + } + static const Type *getIndexedType(const Type *Ptr, unsigned Idx); + + typedef const unsigned* idx_iterator; + inline idx_iterator idx_begin() const { return Indices.begin(); } + inline idx_iterator idx_end() const { return Indices.end(); } + + Value *getAggregateOperand() { + return getOperand(0); + } + const Value *getAggregateOperand() const { + return getOperand(0); + } + static unsigned getAggregateOperandIndex() { + return 0U; // get index for modifying correct operand + } + + unsigned getNumIndices() const { // Note: always non-negative + return (unsigned)Indices.size(); + } + + bool hasIndices() const { + return true; + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ExtractValueInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::ExtractValue; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template<typename InputIterator> +ExtractValueInst::ExtractValueInst(Value *Agg, + InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr, + Instruction *InsertBefore) + : UnaryInstruction(checkType(getIndexedType(Agg->getType(), + IdxBegin, IdxEnd)), + ExtractValue, Agg, InsertBefore) { + init(IdxBegin, IdxEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} +template<typename InputIterator> +ExtractValueInst::ExtractValueInst(Value *Agg, + InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr, + BasicBlock *InsertAtEnd) + : UnaryInstruction(checkType(getIndexedType(Agg->getType(), + IdxBegin, IdxEnd)), + ExtractValue, Agg, InsertAtEnd) { + init(IdxBegin, IdxEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} + + +//===----------------------------------------------------------------------===// +// InsertValueInst Class +//===----------------------------------------------------------------------===// + +/// InsertValueInst - This instruction inserts a struct field of array element +/// value into an aggregate value. +/// +class InsertValueInst : public Instruction { + SmallVector<unsigned, 4> Indices; + + void *operator new(size_t, unsigned); // Do not implement + InsertValueInst(const InsertValueInst &IVI); + void init(Value *Agg, Value *Val, const unsigned *Idx, unsigned NumIdx, + const std::string &NameStr); + void init(Value *Agg, Value *Val, unsigned Idx, const std::string &NameStr); + + template<typename InputIterator> + void init(Value *Agg, Value *Val, + InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, + // This argument ensures that we have an iterator we can + // do arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumIdx = static_cast<unsigned>(std::distance(IdxBegin, IdxEnd)); + + // There's no fundamental reason why we require at least one index + // (other than weirdness with &*IdxBegin being invalid; see + // getelementptr's init routine for example). But there's no + // present need to support it. + assert(NumIdx > 0 && "InsertValueInst must have at least one index"); + + // This requires that the iterator points to contiguous memory. + init(Agg, Val, &*IdxBegin, NumIdx, NameStr); // FIXME: for the general case + // we have to build an array here + } + + /// Constructors - Create a insertvalue instruction with a base aggregate + /// value, a value to insert, and a list of indices. The first ctor can + /// optionally insert before an existing instruction, the second appends + /// the new instruction to the specified BasicBlock. + template<typename InputIterator> + inline InsertValueInst(Value *Agg, Value *Val, InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr, + Instruction *InsertBefore); + template<typename InputIterator> + inline InsertValueInst(Value *Agg, Value *Val, + InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, BasicBlock *InsertAtEnd); + + /// Constructors - These two constructors are convenience methods because one + /// and two index insertvalue instructions are so common. + InsertValueInst(Value *Agg, Value *Val, + unsigned Idx, const std::string &NameStr = "", + Instruction *InsertBefore = 0); + InsertValueInst(Value *Agg, Value *Val, unsigned Idx, + const std::string &NameStr, BasicBlock *InsertAtEnd); +public: + // allocate space for exactly two operands + void *operator new(size_t s) { + return User::operator new(s, 2); + } + + template<typename InputIterator> + static InsertValueInst *Create(Value *Agg, Value *Val, InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new InsertValueInst(Agg, Val, IdxBegin, IdxEnd, + NameStr, InsertBefore); + } + template<typename InputIterator> + static InsertValueInst *Create(Value *Agg, Value *Val, + InputIterator IdxBegin, InputIterator IdxEnd, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new InsertValueInst(Agg, Val, IdxBegin, IdxEnd, + NameStr, InsertAtEnd); + } + + /// Constructors - These two creators are convenience methods because one + /// index insertvalue instructions are much more common than those with + /// more than one. + static InsertValueInst *Create(Value *Agg, Value *Val, unsigned Idx, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new InsertValueInst(Agg, Val, Idx, NameStr, InsertBefore); + } + static InsertValueInst *Create(Value *Agg, Value *Val, unsigned Idx, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new InsertValueInst(Agg, Val, Idx, NameStr, InsertAtEnd); + } + + virtual InsertValueInst *clone() const; + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + typedef const unsigned* idx_iterator; + inline idx_iterator idx_begin() const { return Indices.begin(); } + inline idx_iterator idx_end() const { return Indices.end(); } + + Value *getAggregateOperand() { + return getOperand(0); + } + const Value *getAggregateOperand() const { + return getOperand(0); + } + static unsigned getAggregateOperandIndex() { + return 0U; // get index for modifying correct operand + } + + Value *getInsertedValueOperand() { + return getOperand(1); + } + const Value *getInsertedValueOperand() const { + return getOperand(1); + } + static unsigned getInsertedValueOperandIndex() { + return 1U; // get index for modifying correct operand + } + + unsigned getNumIndices() const { // Note: always non-negative + return (unsigned)Indices.size(); + } + + bool hasIndices() const { + return true; + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const InsertValueInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::InsertValue; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +template <> +struct OperandTraits<InsertValueInst> : FixedNumOperandTraits<2> { +}; + +template<typename InputIterator> +InsertValueInst::InsertValueInst(Value *Agg, + Value *Val, + InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr, + Instruction *InsertBefore) + : Instruction(Agg->getType(), InsertValue, + OperandTraits<InsertValueInst>::op_begin(this), + 2, InsertBefore) { + init(Agg, Val, IdxBegin, IdxEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} +template<typename InputIterator> +InsertValueInst::InsertValueInst(Value *Agg, + Value *Val, + InputIterator IdxBegin, + InputIterator IdxEnd, + const std::string &NameStr, + BasicBlock *InsertAtEnd) + : Instruction(Agg->getType(), InsertValue, + OperandTraits<InsertValueInst>::op_begin(this), + 2, InsertAtEnd) { + init(Agg, Val, IdxBegin, IdxEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) + +//===----------------------------------------------------------------------===// +// PHINode Class +//===----------------------------------------------------------------------===// + +// PHINode - The PHINode class is used to represent the magical mystical PHI +// node, that can not exist in nature, but can be synthesized in a computer +// scientist's overactive imagination. +// +class PHINode : public Instruction { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + /// ReservedSpace - The number of operands actually allocated. NumOperands is + /// the number actually in use. + unsigned ReservedSpace; + PHINode(const PHINode &PN); + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } + explicit PHINode(const Type *Ty, const std::string &NameStr = "", + Instruction *InsertBefore = 0) + : Instruction(Ty, Instruction::PHI, 0, 0, InsertBefore), + ReservedSpace(0) { + setName(NameStr); + } + + PHINode(const Type *Ty, const std::string &NameStr, BasicBlock *InsertAtEnd) + : Instruction(Ty, Instruction::PHI, 0, 0, InsertAtEnd), + ReservedSpace(0) { + setName(NameStr); + } +public: + static PHINode *Create(const Type *Ty, const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + return new PHINode(Ty, NameStr, InsertBefore); + } + static PHINode *Create(const Type *Ty, const std::string &NameStr, + BasicBlock *InsertAtEnd) { + return new PHINode(Ty, NameStr, InsertAtEnd); + } + ~PHINode(); + + /// reserveOperandSpace - This method can be used to avoid repeated + /// reallocation of PHI operand lists by reserving space for the correct + /// number of operands before adding them. Unlike normal vector reserves, + /// this method can also be used to trim the operand space. + void reserveOperandSpace(unsigned NumValues) { + resizeOperands(NumValues*2); + } + + virtual PHINode *clone() const; + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// getNumIncomingValues - Return the number of incoming edges + /// + unsigned getNumIncomingValues() const { return getNumOperands()/2; } + + /// getIncomingValue - Return incoming value number x + /// + Value *getIncomingValue(unsigned i) const { + assert(i*2 < getNumOperands() && "Invalid value number!"); + return getOperand(i*2); + } + void setIncomingValue(unsigned i, Value *V) { + assert(i*2 < getNumOperands() && "Invalid value number!"); + setOperand(i*2, V); + } + static unsigned getOperandNumForIncomingValue(unsigned i) { + return i*2; + } + static unsigned getIncomingValueNumForOperand(unsigned i) { + assert(i % 2 == 0 && "Invalid incoming-value operand index!"); + return i/2; + } + + /// getIncomingBlock - Return incoming basic block corresponding + /// to value use iterator + /// + template <typename U> + BasicBlock *getIncomingBlock(value_use_iterator<U> I) const { + assert(this == *I && "Iterator doesn't point to PHI's Uses?"); + return static_cast<BasicBlock*>((&I.getUse() + 1)->get()); + } + /// getIncomingBlock - Return incoming basic block number x + /// + BasicBlock *getIncomingBlock(unsigned i) const { + return static_cast<BasicBlock*>(getOperand(i*2+1)); + } + void setIncomingBlock(unsigned i, BasicBlock *BB) { + setOperand(i*2+1, BB); + } + static unsigned getOperandNumForIncomingBlock(unsigned i) { + return i*2+1; + } + static unsigned getIncomingBlockNumForOperand(unsigned i) { + assert(i % 2 == 1 && "Invalid incoming-block operand index!"); + return i/2; + } + + /// addIncoming - Add an incoming value to the end of the PHI list + /// + void addIncoming(Value *V, BasicBlock *BB) { + assert(V && "PHI node got a null value!"); + assert(BB && "PHI node got a null basic block!"); + assert(getType() == V->getType() && + "All operands to PHI node must be the same type as the PHI node!"); + unsigned OpNo = NumOperands; + if (OpNo+2 > ReservedSpace) + resizeOperands(0); // Get more space! + // Initialize some new operands. + NumOperands = OpNo+2; + OperandList[OpNo] = V; + OperandList[OpNo+1] = BB; + } + + /// removeIncomingValue - Remove an incoming value. This is useful if a + /// predecessor basic block is deleted. The value removed is returned. + /// + /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty + /// is true), the PHI node is destroyed and any uses of it are replaced with + /// dummy values. The only time there should be zero incoming values to a PHI + /// node is when the block is dead, so this strategy is sound. + /// + Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); + + Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { + int Idx = getBasicBlockIndex(BB); + assert(Idx >= 0 && "Invalid basic block argument to remove!"); + return removeIncomingValue(Idx, DeletePHIIfEmpty); + } + + /// getBasicBlockIndex - Return the first index of the specified basic + /// block in the value list for this PHI. Returns -1 if no instance. + /// + int getBasicBlockIndex(const BasicBlock *BB) const { + Use *OL = OperandList; + for (unsigned i = 0, e = getNumOperands(); i != e; i += 2) + if (OL[i+1].get() == BB) return i/2; + return -1; + } + + Value *getIncomingValueForBlock(const BasicBlock *BB) const { + return getIncomingValue(getBasicBlockIndex(BB)); + } + + /// hasConstantValue - If the specified PHI node always merges together the + /// same value, return the value, otherwise return null. + /// + Value *hasConstantValue(bool AllowNonDominatingInstruction = false) const; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const PHINode *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::PHI; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } + private: + void resizeOperands(unsigned NumOperands); +}; + +template <> +struct OperandTraits<PHINode> : HungoffOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) + + +//===----------------------------------------------------------------------===// +// ReturnInst Class +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------------- +/// ReturnInst - Return a value (possibly void), from a function. Execution +/// does not continue in this function any longer. +/// +class ReturnInst : public TerminatorInst { + ReturnInst(const ReturnInst &RI); + +private: + // ReturnInst constructors: + // ReturnInst() - 'ret void' instruction + // ReturnInst( null) - 'ret void' instruction + // ReturnInst(Value* X) - 'ret X' instruction + // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I + // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I + // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B + // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B + // + // NOTE: If the Value* passed is of type void then the constructor behaves as + // if it was passed NULL. + explicit ReturnInst(Value *retVal = 0, Instruction *InsertBefore = 0); + ReturnInst(Value *retVal, BasicBlock *InsertAtEnd); + explicit ReturnInst(BasicBlock *InsertAtEnd); +public: + static ReturnInst* Create(Value *retVal = 0, Instruction *InsertBefore = 0) { + return new(!!retVal) ReturnInst(retVal, InsertBefore); + } + static ReturnInst* Create(Value *retVal, BasicBlock *InsertAtEnd) { + return new(!!retVal) ReturnInst(retVal, InsertAtEnd); + } + static ReturnInst* Create(BasicBlock *InsertAtEnd) { + return new(0) ReturnInst(InsertAtEnd); + } + virtual ~ReturnInst(); + + virtual ReturnInst *clone() const; + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// Convenience accessor + Value *getReturnValue(unsigned n = 0) const { + return n < getNumOperands() + ? getOperand(n) + : 0; + } + + unsigned getNumSuccessors() const { return 0; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ReturnInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::Ret); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } + private: + virtual BasicBlock *getSuccessorV(unsigned idx) const; + virtual unsigned getNumSuccessorsV() const; + virtual void setSuccessorV(unsigned idx, BasicBlock *B); +}; + +template <> +struct OperandTraits<ReturnInst> : OptionalOperandTraits<> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) + +//===----------------------------------------------------------------------===// +// BranchInst Class +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------------- +/// BranchInst - Conditional or Unconditional Branch instruction. +/// +class BranchInst : public TerminatorInst { + /// Ops list - Branches are strange. The operands are ordered: + /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because + /// they don't have to check for cond/uncond branchness. These are mostly + /// accessed relative from op_end(). + BranchInst(const BranchInst &BI); + void AssertOK(); + // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): + // BranchInst(BB *B) - 'br B' + // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' + // BranchInst(BB* B, Inst *I) - 'br B' insert before I + // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I + // BranchInst(BB* B, BB *I) - 'br B' insert at end + // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end + explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = 0); + BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, + Instruction *InsertBefore = 0); + BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); + BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, + BasicBlock *InsertAtEnd); +public: + static BranchInst *Create(BasicBlock *IfTrue, Instruction *InsertBefore = 0) { + return new(1, true) BranchInst(IfTrue, InsertBefore); + } + static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, + Value *Cond, Instruction *InsertBefore = 0) { + return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); + } + static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { + return new(1, true) BranchInst(IfTrue, InsertAtEnd); + } + static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, + Value *Cond, BasicBlock *InsertAtEnd) { + return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); + } + + ~BranchInst(); + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + virtual BranchInst *clone() const; + + bool isUnconditional() const { return getNumOperands() == 1; } + bool isConditional() const { return getNumOperands() == 3; } + + Value *getCondition() const { + assert(isConditional() && "Cannot get condition of an uncond branch!"); + return Op<-3>(); + } + + void setCondition(Value *V) { + assert(isConditional() && "Cannot set condition of unconditional branch!"); + Op<-3>() = V; + } + + // setUnconditionalDest - Change the current branch to an unconditional branch + // targeting the specified block. + // FIXME: Eliminate this ugly method. + void setUnconditionalDest(BasicBlock *Dest) { + Op<-1>() = Dest; + if (isConditional()) { // Convert this to an uncond branch. + Op<-2>() = 0; + Op<-3>() = 0; + NumOperands = 1; + OperandList = op_begin(); + } + } + + unsigned getNumSuccessors() const { return 1+isConditional(); } + + BasicBlock *getSuccessor(unsigned i) const { + assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); + return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); + } + + void setSuccessor(unsigned idx, BasicBlock *NewSucc) { + assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); + *(&Op<-1>() - idx) = NewSucc; + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const BranchInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::Br); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +private: + virtual BasicBlock *getSuccessorV(unsigned idx) const; + virtual unsigned getNumSuccessorsV() const; + virtual void setSuccessorV(unsigned idx, BasicBlock *B); +}; + +template <> +struct OperandTraits<BranchInst> : VariadicOperandTraits<1> {}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) + +//===----------------------------------------------------------------------===// +// SwitchInst Class +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------------- +/// SwitchInst - Multiway switch +/// +class SwitchInst : public TerminatorInst { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + unsigned ReservedSpace; + // Operand[0] = Value to switch on + // Operand[1] = Default basic block destination + // Operand[2n ] = Value to match + // Operand[2n+1] = BasicBlock to go to on match + SwitchInst(const SwitchInst &RI); + void init(Value *Value, BasicBlock *Default, unsigned NumCases); + void resizeOperands(unsigned No); + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } + /// SwitchInst ctor - Create a new switch instruction, specifying a value to + /// switch on and a default destination. The number of additional cases can + /// be specified here to make memory allocation more efficient. This + /// constructor can also autoinsert before another instruction. + SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, + Instruction *InsertBefore = 0); + + /// SwitchInst ctor - Create a new switch instruction, specifying a value to + /// switch on and a default destination. The number of additional cases can + /// be specified here to make memory allocation more efficient. This + /// constructor also autoinserts at the end of the specified BasicBlock. + SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, + BasicBlock *InsertAtEnd); +public: + static SwitchInst *Create(Value *Value, BasicBlock *Default, + unsigned NumCases, Instruction *InsertBefore = 0) { + return new SwitchInst(Value, Default, NumCases, InsertBefore); + } + static SwitchInst *Create(Value *Value, BasicBlock *Default, + unsigned NumCases, BasicBlock *InsertAtEnd) { + return new SwitchInst(Value, Default, NumCases, InsertAtEnd); + } + ~SwitchInst(); + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + // Accessor Methods for Switch stmt + Value *getCondition() const { return getOperand(0); } + void setCondition(Value *V) { setOperand(0, V); } + + BasicBlock *getDefaultDest() const { + return cast<BasicBlock>(getOperand(1)); + } + + /// getNumCases - return the number of 'cases' in this switch instruction. + /// Note that case #0 is always the default case. + unsigned getNumCases() const { + return getNumOperands()/2; + } + + /// getCaseValue - Return the specified case value. Note that case #0, the + /// default destination, does not have a case value. + ConstantInt *getCaseValue(unsigned i) { + assert(i && i < getNumCases() && "Illegal case value to get!"); + return getSuccessorValue(i); + } + + /// getCaseValue - Return the specified case value. Note that case #0, the + /// default destination, does not have a case value. + const ConstantInt *getCaseValue(unsigned i) const { + assert(i && i < getNumCases() && "Illegal case value to get!"); + return getSuccessorValue(i); + } + + /// findCaseValue - Search all of the case values for the specified constant. + /// If it is explicitly handled, return the case number of it, otherwise + /// return 0 to indicate that it is handled by the default handler. + unsigned findCaseValue(const ConstantInt *C) const { + for (unsigned i = 1, e = getNumCases(); i != e; ++i) + if (getCaseValue(i) == C) + return i; + return 0; + } + + /// findCaseDest - Finds the unique case value for a given successor. Returns + /// null if the successor is not found, not unique, or is the default case. + ConstantInt *findCaseDest(BasicBlock *BB) { + if (BB == getDefaultDest()) return NULL; + + ConstantInt *CI = NULL; + for (unsigned i = 1, e = getNumCases(); i != e; ++i) { + if (getSuccessor(i) == BB) { + if (CI) return NULL; // Multiple cases lead to BB. + else CI = getCaseValue(i); + } + } + return CI; + } + + /// addCase - Add an entry to the switch instruction... + /// + void addCase(ConstantInt *OnVal, BasicBlock *Dest); + + /// removeCase - This method removes the specified successor from the switch + /// instruction. Note that this cannot be used to remove the default + /// destination (successor #0). + /// + void removeCase(unsigned idx); + + virtual SwitchInst *clone() const; + + unsigned getNumSuccessors() const { return getNumOperands()/2; } + BasicBlock *getSuccessor(unsigned idx) const { + assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); + return cast<BasicBlock>(getOperand(idx*2+1)); + } + void setSuccessor(unsigned idx, BasicBlock *NewSucc) { + assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); + setOperand(idx*2+1, NewSucc); + } + + // getSuccessorValue - Return the value associated with the specified + // successor. + ConstantInt *getSuccessorValue(unsigned idx) const { + assert(idx < getNumSuccessors() && "Successor # out of range!"); + return reinterpret_cast<ConstantInt*>(getOperand(idx*2)); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SwitchInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Switch; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +private: + virtual BasicBlock *getSuccessorV(unsigned idx) const; + virtual unsigned getNumSuccessorsV() const; + virtual void setSuccessorV(unsigned idx, BasicBlock *B); +}; + +template <> +struct OperandTraits<SwitchInst> : HungoffOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) + + +//===----------------------------------------------------------------------===// +// InvokeInst Class +//===----------------------------------------------------------------------===// + +/// InvokeInst - Invoke instruction. The SubclassData field is used to hold the +/// calling convention of the call. +/// +class InvokeInst : public TerminatorInst { + AttrListPtr AttributeList; + InvokeInst(const InvokeInst &BI); + void init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException, + Value* const *Args, unsigned NumArgs); + + template<typename InputIterator> + void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, + // This argument ensures that we have an iterator we can + // do arithmetic on in constant time + std::random_access_iterator_tag) { + unsigned NumArgs = (unsigned)std::distance(ArgBegin, ArgEnd); + + // This requires that the iterator points to contiguous memory. + init(Func, IfNormal, IfException, NumArgs ? &*ArgBegin : 0, NumArgs); + setName(NameStr); + } + + /// Construct an InvokeInst given a range of arguments. + /// InputIterator must be a random-access iterator pointing to + /// contiguous storage (e.g. a std::vector<>::iterator). Checks are + /// made for random-accessness but not for contiguous storage as + /// that would incur runtime overhead. + /// + /// @brief Construct an InvokeInst from a range of arguments + template<typename InputIterator> + inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + unsigned Values, + const std::string &NameStr, Instruction *InsertBefore); + + /// Construct an InvokeInst given a range of arguments. + /// InputIterator must be a random-access iterator pointing to + /// contiguous storage (e.g. a std::vector<>::iterator). Checks are + /// made for random-accessness but not for contiguous storage as + /// that would incur runtime overhead. + /// + /// @brief Construct an InvokeInst from a range of arguments + template<typename InputIterator> + inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + unsigned Values, + const std::string &NameStr, BasicBlock *InsertAtEnd); +public: + template<typename InputIterator> + static InvokeInst *Create(Value *Func, + BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr = "", + Instruction *InsertBefore = 0) { + unsigned Values(ArgEnd - ArgBegin + 3); + return new(Values) InvokeInst(Func, IfNormal, IfException, ArgBegin, ArgEnd, + Values, NameStr, InsertBefore); + } + template<typename InputIterator> + static InvokeInst *Create(Value *Func, + BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + const std::string &NameStr, + BasicBlock *InsertAtEnd) { + unsigned Values(ArgEnd - ArgBegin + 3); + return new(Values) InvokeInst(Func, IfNormal, IfException, ArgBegin, ArgEnd, + Values, NameStr, InsertAtEnd); + } + + virtual InvokeInst *clone() const; + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// getCallingConv/setCallingConv - Get or set the calling convention of this + /// function call. + unsigned getCallingConv() const { return SubclassData; } + void setCallingConv(unsigned CC) { + SubclassData = CC; + } + + /// getAttributes - Return the parameter attributes for this invoke. + /// + const AttrListPtr &getAttributes() const { return AttributeList; } + + /// setAttributes - Set the parameter attributes for this invoke. + /// + void setAttributes(const AttrListPtr &Attrs) { AttributeList = Attrs; } + + /// addAttribute - adds the attribute to the list of attributes. + void addAttribute(unsigned i, Attributes attr); + + /// removeAttribute - removes the attribute from the list of attributes. + void removeAttribute(unsigned i, Attributes attr); + + /// @brief Determine whether the call or the callee has the given attribute. + bool paramHasAttr(unsigned i, Attributes attr) const; + + /// @brief Extract the alignment for a call or parameter (0=unknown). + unsigned getParamAlignment(unsigned i) const { + return AttributeList.getParamAlignment(i); + } + + /// @brief Determine if the call does not access memory. + bool doesNotAccessMemory() const { + return paramHasAttr(0, Attribute::ReadNone); + } + void setDoesNotAccessMemory(bool NotAccessMemory = true) { + if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone); + else removeAttribute(~0, Attribute::ReadNone); + } + + /// @brief Determine if the call does not access or only reads memory. + bool onlyReadsMemory() const { + return doesNotAccessMemory() || paramHasAttr(~0, Attribute::ReadOnly); + } + void setOnlyReadsMemory(bool OnlyReadsMemory = true) { + if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly); + else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone); + } + + /// @brief Determine if the call cannot return. + bool doesNotReturn() const { + return paramHasAttr(~0, Attribute::NoReturn); + } + void setDoesNotReturn(bool DoesNotReturn = true) { + if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn); + else removeAttribute(~0, Attribute::NoReturn); + } + + /// @brief Determine if the call cannot unwind. + bool doesNotThrow() const { + return paramHasAttr(~0, Attribute::NoUnwind); + } + void setDoesNotThrow(bool DoesNotThrow = true) { + if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind); + else removeAttribute(~0, Attribute::NoUnwind); + } + + /// @brief Determine if the call returns a structure through first + /// pointer argument. + bool hasStructRetAttr() const { + // Be friendly and also check the callee. + return paramHasAttr(1, Attribute::StructRet); + } + + /// @brief Determine if any call argument is an aggregate passed by value. + bool hasByValArgument() const { + return AttributeList.hasAttrSomewhere(Attribute::ByVal); + } + + /// getCalledFunction - Return the function called, or null if this is an + /// indirect function invocation. + /// + Function *getCalledFunction() const { + return dyn_cast<Function>(getOperand(0)); + } + + /// getCalledValue - Get a pointer to the function that is invoked by this + /// instruction + const Value *getCalledValue() const { return getOperand(0); } + Value *getCalledValue() { return getOperand(0); } + + // get*Dest - Return the destination basic blocks... + BasicBlock *getNormalDest() const { + return cast<BasicBlock>(getOperand(1)); + } + BasicBlock *getUnwindDest() const { + return cast<BasicBlock>(getOperand(2)); + } + void setNormalDest(BasicBlock *B) { + setOperand(1, B); + } + + void setUnwindDest(BasicBlock *B) { + setOperand(2, B); + } + + BasicBlock *getSuccessor(unsigned i) const { + assert(i < 2 && "Successor # out of range for invoke!"); + return i == 0 ? getNormalDest() : getUnwindDest(); + } + + void setSuccessor(unsigned idx, BasicBlock *NewSucc) { + assert(idx < 2 && "Successor # out of range for invoke!"); + setOperand(idx+1, NewSucc); + } + + unsigned getNumSuccessors() const { return 2; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const InvokeInst *) { return true; } + static inline bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::Invoke); + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +private: + virtual BasicBlock *getSuccessorV(unsigned idx) const; + virtual unsigned getNumSuccessorsV() const; + virtual void setSuccessorV(unsigned idx, BasicBlock *B); +}; + +template <> +struct OperandTraits<InvokeInst> : VariadicOperandTraits<3> { +}; + +template<typename InputIterator> +InvokeInst::InvokeInst(Value *Func, + BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + unsigned Values, + const std::string &NameStr, Instruction *InsertBefore) + : TerminatorInst(cast<FunctionType>(cast<PointerType>(Func->getType()) + ->getElementType())->getReturnType(), + Instruction::Invoke, + OperandTraits<InvokeInst>::op_end(this) - Values, + Values, InsertBefore) { + init(Func, IfNormal, IfException, ArgBegin, ArgEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} +template<typename InputIterator> +InvokeInst::InvokeInst(Value *Func, + BasicBlock *IfNormal, BasicBlock *IfException, + InputIterator ArgBegin, InputIterator ArgEnd, + unsigned Values, + const std::string &NameStr, BasicBlock *InsertAtEnd) + : TerminatorInst(cast<FunctionType>(cast<PointerType>(Func->getType()) + ->getElementType())->getReturnType(), + Instruction::Invoke, + OperandTraits<InvokeInst>::op_end(this) - Values, + Values, InsertAtEnd) { + init(Func, IfNormal, IfException, ArgBegin, ArgEnd, NameStr, + typename std::iterator_traits<InputIterator>::iterator_category()); +} + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value) + +//===----------------------------------------------------------------------===// +// UnwindInst Class +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------------- +/// UnwindInst - Immediately exit the current function, unwinding the stack +/// until an invoke instruction is found. +/// +class UnwindInst : public TerminatorInst { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT +public: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } + explicit UnwindInst(Instruction *InsertBefore = 0); + explicit UnwindInst(BasicBlock *InsertAtEnd); + + virtual UnwindInst *clone() const; + + unsigned getNumSuccessors() const { return 0; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const UnwindInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Unwind; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +private: + virtual BasicBlock *getSuccessorV(unsigned idx) const; + virtual unsigned getNumSuccessorsV() const; + virtual void setSuccessorV(unsigned idx, BasicBlock *B); +}; + +//===----------------------------------------------------------------------===// +// UnreachableInst Class +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------------- +/// UnreachableInst - This function has undefined behavior. In particular, the +/// presence of this instruction indicates some higher level knowledge that the +/// end of the block cannot be reached. +/// +class UnreachableInst : public TerminatorInst { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT +public: + // allocate space for exactly zero operands + void *operator new(size_t s) { + return User::operator new(s, 0); + } + explicit UnreachableInst(Instruction *InsertBefore = 0); + explicit UnreachableInst(BasicBlock *InsertAtEnd); + + virtual UnreachableInst *clone() const; + + unsigned getNumSuccessors() const { return 0; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const UnreachableInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::Unreachable; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +private: + virtual BasicBlock *getSuccessorV(unsigned idx) const; + virtual unsigned getNumSuccessorsV() const; + virtual void setSuccessorV(unsigned idx, BasicBlock *B); +}; + +//===----------------------------------------------------------------------===// +// TruncInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a truncation of integer types. +class TruncInst : public CastInst { + /// Private copy constructor + TruncInst(const TruncInst &CI) + : CastInst(CI.getType(), Trunc, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + TruncInst( + Value *S, ///< The value to be truncated + const Type *Ty, ///< The (smaller) type to truncate to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + TruncInst( + Value *S, ///< The value to be truncated + const Type *Ty, ///< The (smaller) type to truncate to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical TruncInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const TruncInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Trunc; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// ZExtInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents zero extension of integer types. +class ZExtInst : public CastInst { + /// @brief Private copy constructor + ZExtInst(const ZExtInst &CI) + : CastInst(CI.getType(), ZExt, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + ZExtInst( + Value *S, ///< The value to be zero extended + const Type *Ty, ///< The type to zero extend to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end semantics. + ZExtInst( + Value *S, ///< The value to be zero extended + const Type *Ty, ///< The type to zero extend to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical ZExtInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const ZExtInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == ZExt; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// SExtInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a sign extension of integer types. +class SExtInst : public CastInst { + /// @brief Private copy constructor + SExtInst(const SExtInst &CI) + : CastInst(CI.getType(), SExt, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + SExtInst( + Value *S, ///< The value to be sign extended + const Type *Ty, ///< The type to sign extend to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + SExtInst( + Value *S, ///< The value to be sign extended + const Type *Ty, ///< The type to sign extend to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical SExtInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SExtInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == SExt; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// FPTruncInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a truncation of floating point types. +class FPTruncInst : public CastInst { + FPTruncInst(const FPTruncInst &CI) + : CastInst(CI.getType(), FPTrunc, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + FPTruncInst( + Value *S, ///< The value to be truncated + const Type *Ty, ///< The type to truncate to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-before-instruction semantics + FPTruncInst( + Value *S, ///< The value to be truncated + const Type *Ty, ///< The type to truncate to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical FPTruncInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FPTruncInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == FPTrunc; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// FPExtInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents an extension of floating point types. +class FPExtInst : public CastInst { + FPExtInst(const FPExtInst &CI) + : CastInst(CI.getType(), FPExt, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + FPExtInst( + Value *S, ///< The value to be extended + const Type *Ty, ///< The type to extend to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + FPExtInst( + Value *S, ///< The value to be extended + const Type *Ty, ///< The type to extend to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical FPExtInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FPExtInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == FPExt; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// UIToFPInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a cast unsigned integer to floating point. +class UIToFPInst : public CastInst { + UIToFPInst(const UIToFPInst &CI) + : CastInst(CI.getType(), UIToFP, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + UIToFPInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + UIToFPInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical UIToFPInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const UIToFPInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == UIToFP; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// SIToFPInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a cast from signed integer to floating point. +class SIToFPInst : public CastInst { + SIToFPInst(const SIToFPInst &CI) + : CastInst(CI.getType(), SIToFP, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + SIToFPInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + SIToFPInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical SIToFPInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SIToFPInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == SIToFP; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// FPToUIInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a cast from floating point to unsigned integer +class FPToUIInst : public CastInst { + FPToUIInst(const FPToUIInst &CI) + : CastInst(CI.getType(), FPToUI, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + FPToUIInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + FPToUIInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< Where to insert the new instruction + ); + + /// @brief Clone an identical FPToUIInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FPToUIInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == FPToUI; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// FPToSIInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a cast from floating point to signed integer. +class FPToSIInst : public CastInst { + FPToSIInst(const FPToSIInst &CI) + : CastInst(CI.getType(), FPToSI, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + FPToSIInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + FPToSIInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical FPToSIInst + virtual CastInst *clone() const; + + /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const FPToSIInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == FPToSI; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// IntToPtrInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a cast from an integer to a pointer. +class IntToPtrInst : public CastInst { + IntToPtrInst(const IntToPtrInst &CI) + : CastInst(CI.getType(), IntToPtr, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + IntToPtrInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + IntToPtrInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical IntToPtrInst + virtual CastInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const IntToPtrInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == IntToPtr; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// PtrToIntInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a cast from a pointer to an integer +class PtrToIntInst : public CastInst { + PtrToIntInst(const PtrToIntInst &CI) + : CastInst(CI.getType(), PtrToInt, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + PtrToIntInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + PtrToIntInst( + Value *S, ///< The value to be converted + const Type *Ty, ///< The type to convert to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical PtrToIntInst + virtual CastInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const PtrToIntInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == PtrToInt; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +//===----------------------------------------------------------------------===// +// BitCastInst Class +//===----------------------------------------------------------------------===// + +/// @brief This class represents a no-op cast from one type to another. +class BitCastInst : public CastInst { + BitCastInst(const BitCastInst &CI) + : CastInst(CI.getType(), BitCast, CI.getOperand(0)) { + } +public: + /// @brief Constructor with insert-before-instruction semantics + BitCastInst( + Value *S, ///< The value to be casted + const Type *Ty, ///< The type to casted to + const std::string &NameStr = "", ///< A name for the new instruction + Instruction *InsertBefore = 0 ///< Where to insert the new instruction + ); + + /// @brief Constructor with insert-at-end-of-block semantics + BitCastInst( + Value *S, ///< The value to be casted + const Type *Ty, ///< The type to casted to + const std::string &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ); + + /// @brief Clone an identical BitCastInst + virtual CastInst *clone() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const BitCastInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == BitCast; + } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) && classof(cast<Instruction>(V)); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/IntrinsicInst.h b/include/llvm/IntrinsicInst.h new file mode 100644 index 0000000..8f5e05f --- /dev/null +++ b/include/llvm/IntrinsicInst.h @@ -0,0 +1,321 @@ +//===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines classes that make it really easy to deal with intrinsic +// functions with the isa/dyncast family of functions. In particular, this +// allows you to do things like: +// +// if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst)) +// ... MCI->getDest() ... MCI->getSource() ... +// +// All intrinsic function calls are instances of the call instruction, so these +// are all subclasses of the CallInst class. Note that none of these classes +// has state or virtual methods, which is an important part of this gross/neat +// hack working. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INTRINSICINST_H +#define LLVM_INTRINSICINST_H + +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/Intrinsics.h" + +namespace llvm { + /// IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic + /// functions. This allows the standard isa/dyncast/cast functionality to + /// work with calls to intrinsic functions. + class IntrinsicInst : public CallInst { + IntrinsicInst(); // DO NOT IMPLEMENT + IntrinsicInst(const IntrinsicInst&); // DO NOT IMPLEMENT + void operator=(const IntrinsicInst&); // DO NOT IMPLEMENT + public: + /// getIntrinsicID - Return the intrinsic ID of this intrinsic. + /// + Intrinsic::ID getIntrinsicID() const { + return (Intrinsic::ID)getCalledFunction()->getIntrinsicID(); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const IntrinsicInst *) { return true; } + static inline bool classof(const CallInst *I) { + if (const Function *CF = I->getCalledFunction()) + return CF->getIntrinsicID() != 0; + return false; + } + static inline bool classof(const Value *V) { + return isa<CallInst>(V) && classof(cast<CallInst>(V)); + } + }; + + /// DbgInfoIntrinsic - This is the common base class for debug info intrinsics + /// + struct DbgInfoIntrinsic : public IntrinsicInst { + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DbgInfoIntrinsic *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::dbg_stoppoint: + case Intrinsic::dbg_func_start: + case Intrinsic::dbg_region_start: + case Intrinsic::dbg_region_end: + case Intrinsic::dbg_declare: + return true; + default: return false; + } + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + + static Value *StripCast(Value *C); + }; + + /// DbgStopPointInst - This represents the llvm.dbg.stoppoint instruction. + /// + struct DbgStopPointInst : public DbgInfoIntrinsic { + Value *getLineValue() const { return const_cast<Value*>(getOperand(1)); } + Value *getColumnValue() const { return const_cast<Value*>(getOperand(2)); } + Value *getContext() const { + return StripCast(getOperand(3)); + } + + unsigned getLine() const { + return unsigned(cast<ConstantInt>(getOperand(1))->getZExtValue()); + } + unsigned getColumn() const { + return unsigned(cast<ConstantInt>(getOperand(2))->getZExtValue()); + } + + Value* getFileName() const; + Value* getDirectory() const; + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DbgStopPointInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::dbg_stoppoint; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// DbgFuncStartInst - This represents the llvm.dbg.func.start instruction. + /// + struct DbgFuncStartInst : public DbgInfoIntrinsic { + Value *getSubprogram() const { return StripCast(getOperand(1)); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DbgFuncStartInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::dbg_func_start; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// DbgRegionStartInst - This represents the llvm.dbg.region.start + /// instruction. + struct DbgRegionStartInst : public DbgInfoIntrinsic { + Value *getContext() const { return StripCast(getOperand(1)); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DbgRegionStartInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::dbg_region_start; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// DbgRegionEndInst - This represents the llvm.dbg.region.end instruction. + /// + struct DbgRegionEndInst : public DbgInfoIntrinsic { + Value *getContext() const { return StripCast(getOperand(1)); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DbgRegionEndInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::dbg_region_end; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// DbgDeclareInst - This represents the llvm.dbg.declare instruction. + /// + struct DbgDeclareInst : public DbgInfoIntrinsic { + Value *getAddress() const { return getOperand(1); } + Value *getVariable() const { return StripCast(getOperand(2)); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const DbgDeclareInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::dbg_declare; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// MemIntrinsic - This is the common base class for memset/memcpy/memmove. + /// + struct MemIntrinsic : public IntrinsicInst { + Value *getRawDest() const { return const_cast<Value*>(getOperand(1)); } + + Value *getLength() const { return const_cast<Value*>(getOperand(3)); } + ConstantInt *getAlignmentCst() const { + return cast<ConstantInt>(const_cast<Value*>(getOperand(4))); + } + + unsigned getAlignment() const { + return getAlignmentCst()->getZExtValue(); + } + + /// getDest - This is just like getRawDest, but it strips off any cast + /// instructions that feed it, giving the original input. The returned + /// value is guaranteed to be a pointer. + Value *getDest() const { return getRawDest()->stripPointerCasts(); } + + /// set* - Set the specified arguments of the instruction. + /// + void setDest(Value *Ptr) { + assert(getRawDest()->getType() == Ptr->getType() && + "setDest called with pointer of wrong type!"); + setOperand(1, Ptr); + } + + void setLength(Value *L) { + assert(getLength()->getType() == L->getType() && + "setLength called with value of wrong type!"); + setOperand(3, L); + } + void setAlignment(unsigned A) { + const Type *Int32Ty = getOperand(4)->getType(); + setOperand(4, ConstantInt::get(Int32Ty, A)); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MemIntrinsic *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::memcpy: + case Intrinsic::memmove: + case Intrinsic::memset: + return true; + default: return false; + } + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// MemSetInst - This class wraps the llvm.memset intrinsic. + /// + struct MemSetInst : public MemIntrinsic { + /// get* - Return the arguments to the instruction. + /// + Value *getValue() const { return const_cast<Value*>(getOperand(2)); } + + void setValue(Value *Val) { + assert(getValue()->getType() == Val->getType() && + "setSource called with pointer of wrong type!"); + setOperand(2, Val); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MemSetInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::memset; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics. + /// + struct MemTransferInst : public MemIntrinsic { + /// get* - Return the arguments to the instruction. + /// + Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); } + + /// getSource - This is just like getRawSource, but it strips off any cast + /// instructions that feed it, giving the original input. The returned + /// value is guaranteed to be a pointer. + Value *getSource() const { return getRawSource()->stripPointerCasts(); } + + void setSource(Value *Ptr) { + assert(getRawSource()->getType() == Ptr->getType() && + "setSource called with pointer of wrong type!"); + setOperand(2, Ptr); + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MemTransferInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::memcpy || + I->getIntrinsicID() == Intrinsic::memmove; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + + /// MemCpyInst - This class wraps the llvm.memcpy intrinsic. + /// + struct MemCpyInst : public MemTransferInst { + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MemCpyInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::memcpy; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// MemMoveInst - This class wraps the llvm.memmove intrinsic. + /// + struct MemMoveInst : public MemTransferInst { + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MemMoveInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::memmove; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// EHSelectorInst - This represents the llvm.eh.selector instruction. + /// + struct EHSelectorInst : public IntrinsicInst { + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const EHSelectorInst *) { return true; } + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::eh_selector_i32 || + I->getIntrinsicID() == Intrinsic::eh_selector_i64; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + +} + +#endif diff --git a/include/llvm/Intrinsics.h b/include/llvm/Intrinsics.h new file mode 100644 index 0000000..227eb5a --- /dev/null +++ b/include/llvm/Intrinsics.h @@ -0,0 +1,78 @@ +//===-- llvm/Instrinsics.h - LLVM Intrinsic Function Handling ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a set of enums which allow processing of intrinsic +// functions. Values of these enum types are returned by +// Function::getIntrinsicID. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_INTRINSICS_H +#define LLVM_INTRINSICS_H + +#include <string> + +namespace llvm { + +class Type; +class FunctionType; +class Function; +class Module; +class AttrListPtr; + +/// Intrinsic Namespace - This namespace contains an enum with a value for +/// every intrinsic/builtin function known by LLVM. These enum values are +/// returned by Function::getIntrinsicID(). +/// +namespace Intrinsic { + enum ID { + not_intrinsic = 0, // Must be zero + + // Get the intrinsic enums generated from Intrinsics.td +#define GET_INTRINSIC_ENUM_VALUES +#include "llvm/Intrinsics.gen" +#undef GET_INTRINSIC_ENUM_VALUES + , num_intrinsics + }; + + /// Intrinsic::getName(ID) - Return the LLVM name for an intrinsic, such as + /// "llvm.ppc.altivec.lvx". + std::string getName(ID id, const Type **Tys = 0, unsigned numTys = 0); + + /// Intrinsic::getType(ID) - Return the function type for an intrinsic. + /// + const FunctionType *getType(ID id, const Type **Tys = 0, unsigned numTys = 0); + + /// Intrinsic::isOverloaded(ID) - Returns true if the intrinsic can be + /// overloaded. + bool isOverloaded(ID id); + + /// Intrinsic::getAttributes(ID) - Return the attributes for an intrinsic. + /// + AttrListPtr getAttributes(ID id); + + /// Intrinsic::getDeclaration(M, ID) - Create or insert an LLVM Function + /// declaration for an intrinsic, and return it. + /// + /// The Tys and numTys parameters are for intrinsics with overloaded types + /// (i.e., those using iAny or fAny). For a declaration for an overloaded + /// intrinsic, Tys should point to an array of numTys pointers to Type, + /// and must provide exactly one type for each overloaded type in the + /// intrinsic. + Function *getDeclaration(Module *M, ID id, const Type **Tys = 0, + unsigned numTys = 0); + + /// Map a GCC builtin name to an intrinsic ID. + ID getIntrinsicForGCCBuiltin(const char *Prefix, const char *BuiltinName); + +} // End Intrinsic namespace + +} // End llvm namespace + +#endif diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td new file mode 100644 index 0000000..bce3ce0 --- /dev/null +++ b/include/llvm/Intrinsics.td @@ -0,0 +1,455 @@ +//===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines properties of all LLVM intrinsics. +// +//===----------------------------------------------------------------------===// + +include "llvm/CodeGen/ValueTypes.td" + +//===----------------------------------------------------------------------===// +// Properties we keep track of for intrinsics. +//===----------------------------------------------------------------------===// + +class IntrinsicProperty; + +// Intr*Mem - Memory properties. An intrinsic is allowed to have exactly one of +// these properties set. They are listed from the most aggressive (best to use +// if correct) to the least aggressive. If no property is set, the worst case +// is assumed (IntrWriteMem). + +// IntrNoMem - The intrinsic does not access memory or have any other side +// effects. It may be CSE'd deleted if dead, etc. +def IntrNoMem : IntrinsicProperty; + +// IntrReadArgMem - This intrinsic reads only from memory that one of its +// arguments points to, but may read an unspecified amount. +def IntrReadArgMem : IntrinsicProperty; + +// IntrReadMem - This intrinsic reads from unspecified memory, so it cannot be +// moved across stores. However, it can be reordered otherwise and can be +// deleted if dead. +def IntrReadMem : IntrinsicProperty; + +// IntrWriteArgMem - This intrinsic reads and writes only from memory that one +// of its arguments points to, but may access an unspecified amount. The reads +// and writes may be volatile, but except for this it has no other side effects. +def IntrWriteArgMem : IntrinsicProperty; + +// IntrWriteMem - This intrinsic may read or modify unspecified memory or has +// other side effects. It cannot be modified by the optimizer. This is the +// default if the intrinsic has no other Intr*Mem property. +def IntrWriteMem : IntrinsicProperty; + +// Commutative - This intrinsic is commutative: X op Y == Y op X. +def Commutative : IntrinsicProperty; + +// NoCapture - The specified argument pointer is not captured by the intrinsic. +class NoCapture<int argNo> : IntrinsicProperty { + int ArgNo = argNo; +} + +//===----------------------------------------------------------------------===// +// Types used by intrinsics. +//===----------------------------------------------------------------------===// + +class LLVMType<ValueType vt> { + ValueType VT = vt; +} + +class LLVMPointerType<LLVMType elty> + : LLVMType<iPTR>{ + LLVMType ElTy = elty; +} + +class LLVMAnyPointerType<LLVMType elty> + : LLVMType<iPTRAny>{ + LLVMType ElTy = elty; +} + +// Match the type of another intrinsic parameter. Number is an index into the +// list of overloaded types for the intrinsic, excluding all the fixed types. +// The Number value must refer to a previously listed type. For example: +// Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyfloat_ty, LLVMMatchType<0>]> +// has two overloaded types, the 2nd and 3rd arguments. LLVMMatchType<0> +// refers to the first overloaded type, which is the 2nd argument. +class LLVMMatchType<int num> + : LLVMType<OtherVT>{ + int Number = num; +} + +// Match the type of another intrinsic parameter that is expected to be +// an integral vector type, but change the element size to be twice as wide +// or half as wide as the other type. This is only useful when the intrinsic +// is overloaded, so the matched type should be declared as iAny. +class LLVMExtendedElementVectorType<int num> : LLVMMatchType<num>; +class LLVMTruncatedElementVectorType<int num> : LLVMMatchType<num>; + +def llvm_void_ty : LLVMType<isVoid>; +def llvm_anyint_ty : LLVMType<iAny>; +def llvm_anyfloat_ty : LLVMType<fAny>; +def llvm_i1_ty : LLVMType<i1>; +def llvm_i8_ty : LLVMType<i8>; +def llvm_i16_ty : LLVMType<i16>; +def llvm_i32_ty : LLVMType<i32>; +def llvm_i64_ty : LLVMType<i64>; +def llvm_float_ty : LLVMType<f32>; +def llvm_double_ty : LLVMType<f64>; +def llvm_f80_ty : LLVMType<f80>; +def llvm_f128_ty : LLVMType<f128>; +def llvm_ppcf128_ty : LLVMType<ppcf128>; +def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8* +def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8** +def llvm_anyptr_ty : LLVMAnyPointerType<llvm_i8_ty>; // (space)i8* +def llvm_empty_ty : LLVMType<OtherVT>; // { } +def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }* + +def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8 +def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16 +def llvm_v2i64_ty : LLVMType<v2i64>; // 2 x i64 +def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32 +def llvm_v1i64_ty : LLVMType<v1i64>; // 1 x i64 +def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32 +def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float +def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double + +// MMX Vector Types +def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8 +def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16 + +def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here + +//===----------------------------------------------------------------------===// +// Intrinsic Definitions. +//===----------------------------------------------------------------------===// + +// Intrinsic class - This is used to define one LLVM intrinsic. The name of the +// intrinsic definition should start with "int_", then match the LLVM intrinsic +// name with the "llvm." prefix removed, and all "."s turned into "_"s. For +// example, llvm.bswap.i16 -> int_bswap_i16. +// +// * RetTypes is a list containing the return types expected for the +// intrinsic. +// * ParamTypes is a list containing the parameter types expected for the +// intrinsic. +// * Properties can be set to describe the behavior of the intrinsic. +// +class Intrinsic<list<LLVMType> ret_types, + list<LLVMType> param_types = [], + list<IntrinsicProperty> properties = [], + string name = ""> { + string LLVMName = name; + string TargetPrefix = ""; // Set to a prefix for target-specific intrinsics. + list<LLVMType> RetTypes = ret_types; + list<LLVMType> ParamTypes = param_types; + list<IntrinsicProperty> Properties = properties; + + bit isTarget = 0; +} + +/// GCCBuiltin - If this intrinsic exactly corresponds to a GCC builtin, this +/// specifies the name of the builtin. This provides automatic CBE and CFE +/// support. +class GCCBuiltin<string name> { + string GCCBuiltinName = name; +} + + +//===--------------- Variable Argument Handling Intrinsics ----------------===// +// + +def int_vastart : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [], "llvm.va_start">; +def int_vacopy : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_ptr_ty], [], + "llvm.va_copy">; +def int_vaend : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [], "llvm.va_end">; + +//===------------------- Garbage Collection Intrinsics --------------------===// +// +def int_gcroot : Intrinsic<[llvm_void_ty], + [llvm_ptrptr_ty, llvm_ptr_ty]>; +def int_gcread : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty, llvm_ptrptr_ty], + [IntrReadArgMem]>; +def int_gcwrite : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty], + [IntrWriteArgMem, NoCapture<1>, NoCapture<2>]>; + +//===--------------------- Code Generator Intrinsics ----------------------===// +// +def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; +def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; + +// Note: we treat stacksave/stackrestore as writemem because we don't otherwise +// model their dependencies on allocas. +def int_stacksave : Intrinsic<[llvm_ptr_ty]>, + GCCBuiltin<"__builtin_stack_save">; +def int_stackrestore : Intrinsic<[llvm_void_ty], [llvm_ptr_ty]>, + GCCBuiltin<"__builtin_stack_restore">; + +// IntrWriteArgMem is more pessimistic than strictly necessary for prefetch, +// however it does conveniently prevent the prefetch from being reordered +// with respect to nearby accesses to the same memory. +def int_prefetch : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [IntrWriteArgMem, NoCapture<0>]>; +def int_pcmarker : Intrinsic<[llvm_void_ty], [llvm_i32_ty]>; + +def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>; + +// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack +// guard to the correct place on the stack frame. +def int_stackprotector : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_ptrptr_ty], + [IntrWriteMem]>; + +//===------------------- Standard C Library Intrinsics --------------------===// +// + +def int_memcpy : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_anyint_ty, + llvm_i32_ty], + [IntrWriteArgMem, NoCapture<0>, NoCapture<1>]>; +def int_memmove : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_anyint_ty, + llvm_i32_ty], + [IntrWriteArgMem, NoCapture<0>, NoCapture<1>]>; +def int_memset : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_i8_ty, llvm_anyint_ty, + llvm_i32_ty], + [IntrWriteArgMem, NoCapture<0>]>; + +// These functions do not actually read memory, but they are sensitive to the +// rounding mode. This needs to be modelled separately; in the meantime +// declaring them as reading memory is conservatively correct. +let Properties = [IntrReadMem] in { + def int_sqrt : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_powi : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>; + def int_sin : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_cos : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_pow : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; + def int_log : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_log10: Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_log2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_exp2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; +} + +// NOTE: these are internal interfaces. +def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; +def int_longjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>; +def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>; +def int_siglongjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>; + +//===-------------------- Bit Manipulation Intrinsics ---------------------===// +// + +// None of these intrinsics accesses memory at all. +let Properties = [IntrNoMem] in { + def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; + def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; + def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; + def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; + def int_part_select : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty]>; + def int_part_set : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyint_ty, + llvm_i32_ty, llvm_i32_ty]>; +} + +//===------------------------ Debugger Intrinsics -------------------------===// +// + +// None of these intrinsics accesses memory at all...but that doesn't mean the +// optimizers can change them aggressively. Special handling needed in a few +// places. +let Properties = [IntrNoMem] in { + def int_dbg_stoppoint : Intrinsic<[llvm_void_ty], + [llvm_i32_ty, llvm_i32_ty, + llvm_descriptor_ty]>; + def int_dbg_region_start : Intrinsic<[llvm_void_ty], [llvm_descriptor_ty]>; + def int_dbg_region_end : Intrinsic<[llvm_void_ty], [llvm_descriptor_ty]>; + def int_dbg_func_start : Intrinsic<[llvm_void_ty], [llvm_descriptor_ty]>; + def int_dbg_declare : Intrinsic<[llvm_void_ty], + [llvm_descriptor_ty, llvm_descriptor_ty]>; +} + +//===------------------ Exception Handling Intrinsics----------------------===// +// +def int_eh_exception : Intrinsic<[llvm_ptr_ty]>; +def int_eh_selector_i32 : Intrinsic<[llvm_i32_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_vararg_ty]>; +def int_eh_selector_i64 : Intrinsic<[llvm_i64_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_vararg_ty]>; + +def int_eh_typeid_for_i32 : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; +def int_eh_typeid_for_i64 : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty]>; + +def int_eh_return_i32 : Intrinsic<[llvm_void_ty], [llvm_i32_ty, llvm_ptr_ty]>; +def int_eh_return_i64 : Intrinsic<[llvm_void_ty], [llvm_i64_ty, llvm_ptr_ty]>; + +def int_eh_unwind_init: Intrinsic<[llvm_void_ty]>, + GCCBuiltin<"__builtin_unwind_init">; + +def int_eh_dwarf_cfa : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>; + +let Properties = [IntrNoMem] in { +def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; +def int_eh_sjlj_longjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>; +} + +//===---------------- Generic Variable Attribute Intrinsics----------------===// +// +def int_var_annotation : Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_ptr_ty, + llvm_ptr_ty, llvm_i32_ty], + [], "llvm.var.annotation">; +def int_ptr_annotation : Intrinsic<[LLVMAnyPointerType<llvm_anyint_ty>], + [LLVMMatchType<0>, llvm_ptr_ty, llvm_ptr_ty, + llvm_i32_ty], + [], "llvm.ptr.annotation">; +def int_annotation : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, llvm_ptr_ty, + llvm_ptr_ty, llvm_i32_ty], + [], "llvm.annotation">; + +//===------------------------ Trampoline Intrinsics -----------------------===// +// +def int_init_trampoline : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty], + [IntrWriteArgMem]>, + GCCBuiltin<"__builtin_init_trampoline">; + +//===------------------------ Overflow Intrinsics -------------------------===// +// + +// Expose the carry flag from add operations on two integrals. +def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; +def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; + +def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; +def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; + +def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; +def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [LLVMMatchType<0>, LLVMMatchType<0>]>; + +//===------------------------- Atomic Intrinsics --------------------------===// +// +def int_memory_barrier : Intrinsic<[llvm_void_ty], + [llvm_i1_ty, llvm_i1_ty, + llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>, + GCCBuiltin<"__builtin_llvm_memory_barrier">; + +def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>, LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_val_compare_and_swap">; +def int_atomic_load_add : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_add">; +def int_atomic_swap : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_lock_test_and_set">; +def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_sub">; +def int_atomic_load_and : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_and">; +def int_atomic_load_or : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_or">; +def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_xor">; +def int_atomic_load_nand : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_nand">; +def int_atomic_load_min : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_min">; +def int_atomic_load_max : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_max">; +def int_atomic_load_umin : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_umin">; +def int_atomic_load_umax : Intrinsic<[llvm_anyint_ty], + [LLVMAnyPointerType<LLVMMatchType<0>>, + LLVMMatchType<0>], + [IntrWriteArgMem, NoCapture<0>]>, + GCCBuiltin<"__sync_fetch_and_umax">; + +//===-------------------------- Other Intrinsics --------------------------===// +// +def int_flt_rounds : Intrinsic<[llvm_i32_ty]>, + GCCBuiltin<"__builtin_flt_rounds">; +def int_trap : Intrinsic<[llvm_void_ty]>, + GCCBuiltin<"__builtin_trap">; + +// These convert intrinsics are to support various conversions between +// various types with rounding and saturation. NOTE: avoid using these +// intrinsics as they might be removed sometime in the future and +// most targets don't support them. +def int_convertff : Intrinsic<[llvm_anyfloat_ty], + [llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertfsi : Intrinsic<[llvm_anyfloat_ty], + [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertfui : Intrinsic<[llvm_anyfloat_ty], + [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertsif : Intrinsic<[llvm_anyint_ty], + [llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertuif : Intrinsic<[llvm_anyint_ty], + [llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertss : Intrinsic<[llvm_anyint_ty], + [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertsu : Intrinsic<[llvm_anyint_ty], + [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertus : Intrinsic<[llvm_anyint_ty], + [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>; +def int_convertuu : Intrinsic<[llvm_anyint_ty], + [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>; + +//===----------------------------------------------------------------------===// +// Target-specific intrinsics +//===----------------------------------------------------------------------===// + +include "llvm/IntrinsicsPowerPC.td" +include "llvm/IntrinsicsX86.td" +include "llvm/IntrinsicsARM.td" +include "llvm/IntrinsicsCellSPU.td" +include "llvm/IntrinsicsAlpha.td" +include "llvm/IntrinsicsXCore.td" diff --git a/include/llvm/IntrinsicsARM.td b/include/llvm/IntrinsicsARM.td new file mode 100644 index 0000000..e574938 --- /dev/null +++ b/include/llvm/IntrinsicsARM.td @@ -0,0 +1,21 @@ +//===- IntrinsicsARM.td - Defines ARM intrinsics -----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the ARM-specific intrinsics. +// +//===----------------------------------------------------------------------===// + + +//===----------------------------------------------------------------------===// +// TLS + +let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". + def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">, + Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; +} diff --git a/include/llvm/IntrinsicsAlpha.td b/include/llvm/IntrinsicsAlpha.td new file mode 100644 index 0000000..59865cf --- /dev/null +++ b/include/llvm/IntrinsicsAlpha.td @@ -0,0 +1,18 @@ +//===- IntrinsicsAlpha.td - Defines Alpha intrinsics -------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the Alpha-specific intrinsics. +// +//===----------------------------------------------------------------------===// + + +let TargetPrefix = "alpha" in { // All intrinsics start with "llvm.alpha.". + def int_alpha_umulh : GCCBuiltin<"__builtin_alpha_umulh">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>; +} diff --git a/include/llvm/IntrinsicsCellSPU.td b/include/llvm/IntrinsicsCellSPU.td new file mode 100644 index 0000000..1e311bb --- /dev/null +++ b/include/llvm/IntrinsicsCellSPU.td @@ -0,0 +1,242 @@ +//==- IntrinsicsCellSPU.td - Cell SDK intrinsics -*- tablegen -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// Department at The Aerospace Corporation and is distributed under the +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// Cell SPU Instructions: +//===----------------------------------------------------------------------===// +// TODO Items (not urgent today, but would be nice, low priority) +// +// ANDBI, ORBI: SPU constructs a 4-byte constant for these instructions by +// concatenating the byte argument b as "bbbb". Could recognize this bit pattern +// in 16-bit and 32-bit constants and reduce instruction count. +//===----------------------------------------------------------------------===// + +// 7-bit integer type, used as an immediate: +def cell_i7_ty: LLVMType<i8>; +def cell_i8_ty: LLVMType<i8>; + +// Keep this here until it's actually supported: +def llvm_i128_ty : LLVMType<i128>; + +class v16i8_u7imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, cell_i7_ty], + [IntrNoMem]>; + +class v16i8_u8imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i8_ty], + [IntrNoMem]>; + +class v16i8_s10imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i16_ty], + [IntrNoMem]>; + +class v16i8_u16imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i16_ty], + [IntrNoMem]>; + +class v16i8_rr<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + +class v8i16_s10imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i16_ty], + [IntrNoMem]>; + +class v8i16_u16imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i16_ty], + [IntrNoMem]>; + +class v8i16_rr<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + +class v4i32_rr<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + +class v4i32_u7imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, cell_i7_ty], + [IntrNoMem]>; + +class v4i32_s10imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i16_ty], + [IntrNoMem]>; + +class v4i32_u16imm<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i16_ty], + [IntrNoMem]>; + +class v4f32_rr<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + +class v4f32_rrr<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + +class v2f64_rr<string builtin_suffix> : + GCCBuiltin<!strconcat("__builtin_si_", builtin_suffix)>, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], + [IntrNoMem]>; + +// All Cell SPU intrinsics start with "llvm.spu.". +let TargetPrefix = "spu" in { + def int_spu_si_fsmbi : v8i16_u16imm<"fsmbi">; + def int_spu_si_ah : v8i16_rr<"ah">; + def int_spu_si_ahi : v8i16_s10imm<"ahi">; + def int_spu_si_a : v4i32_rr<"a">; + def int_spu_si_ai : v4i32_s10imm<"ai">; + def int_spu_si_sfh : v8i16_rr<"sfh">; + def int_spu_si_sfhi : v8i16_s10imm<"sfhi">; + def int_spu_si_sf : v4i32_rr<"sf">; + def int_spu_si_sfi : v4i32_s10imm<"sfi">; + def int_spu_si_addx : v4i32_rr<"addx">; + def int_spu_si_cg : v4i32_rr<"cg">; + def int_spu_si_cgx : v4i32_rr<"cgx">; + def int_spu_si_sfx : v4i32_rr<"sfx">; + def int_spu_si_bg : v4i32_rr<"bg">; + def int_spu_si_bgx : v4i32_rr<"bgx">; + def int_spu_si_mpy : // This is special: + GCCBuiltin<"__builtin_si_mpy">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyu : // This is special: + GCCBuiltin<"__builtin_si_mpyu">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyi : // This is special: + GCCBuiltin<"__builtin_si_mpyi">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyui : // This is special: + GCCBuiltin<"__builtin_si_mpyui">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_i16_ty], + [IntrNoMem]>; + def int_spu_si_mpya : // This is special: + GCCBuiltin<"__builtin_si_mpya">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyh : // This is special: + GCCBuiltin<"__builtin_si_mpyh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpys : // This is special: + GCCBuiltin<"__builtin_si_mpys">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyhh : // This is special: + GCCBuiltin<"__builtin_si_mpyhh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyhha : // This is special: + GCCBuiltin<"__builtin_si_mpyhha">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyhhu : // This is special: + GCCBuiltin<"__builtin_si_mpyhhu">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_spu_si_mpyhhau : // This is special: + GCCBuiltin<"__builtin_si_mpyhhau">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + + def int_spu_si_shli: v4i32_u7imm<"shli">; + + def int_spu_si_shlqbi: + GCCBuiltin<!strconcat("__builtin_si_", "shlqbi")>, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; + + def int_spu_si_shlqbii: v16i8_u7imm<"shlqbii">; + def int_spu_si_shlqby: + GCCBuiltin<!strconcat("__builtin_si_", "shlqby")>, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_spu_si_shlqbyi: v16i8_u7imm<"shlqbyi">; + + def int_spu_si_ceq: v4i32_rr<"ceq">; + def int_spu_si_ceqi: v4i32_s10imm<"ceqi">; + def int_spu_si_ceqb: v16i8_rr<"ceqb">; + def int_spu_si_ceqbi: v16i8_u8imm<"ceqbi">; + def int_spu_si_ceqh: v8i16_rr<"ceqh">; + def int_spu_si_ceqhi: v8i16_s10imm<"ceqhi">; + def int_spu_si_cgt: v4i32_rr<"cgt">; + def int_spu_si_cgti: v4i32_s10imm<"cgti">; + def int_spu_si_cgtb: v16i8_rr<"cgtb">; + def int_spu_si_cgtbi: v16i8_u8imm<"cgtbi">; + def int_spu_si_cgth: v8i16_rr<"cgth">; + def int_spu_si_cgthi: v8i16_s10imm<"cgthi">; + def int_spu_si_clgtb: v16i8_rr<"clgtb">; + def int_spu_si_clgtbi: v16i8_u8imm<"clgtbi">; + def int_spu_si_clgth: v8i16_rr<"clgth">; + def int_spu_si_clgthi: v8i16_s10imm<"clgthi">; + def int_spu_si_clgt: v4i32_rr<"clgt">; + def int_spu_si_clgti: v4i32_s10imm<"clgti">; + + def int_spu_si_and: v4i32_rr<"and">; + def int_spu_si_andbi: v16i8_u8imm<"andbi">; + def int_spu_si_andc: v4i32_rr<"andc">; + def int_spu_si_andhi: v8i16_s10imm<"andhi">; + def int_spu_si_andi: v4i32_s10imm<"andi">; + + def int_spu_si_or: v4i32_rr<"or">; + def int_spu_si_orbi: v16i8_u8imm<"orbi">; + def int_spu_si_orc: v4i32_rr<"orc">; + def int_spu_si_orhi: v8i16_s10imm<"orhi">; + def int_spu_si_ori: v4i32_s10imm<"ori">; + + def int_spu_si_xor: v4i32_rr<"xor">; + def int_spu_si_xorbi: v16i8_u8imm<"xorbi">; + def int_spu_si_xorhi: v8i16_s10imm<"xorhi">; + def int_spu_si_xori: v4i32_s10imm<"xori">; + + def int_spu_si_nor: v4i32_rr<"nor">; + def int_spu_si_nand: v4i32_rr<"nand">; + + def int_spu_si_fa: v4f32_rr<"fa">; + def int_spu_si_fs: v4f32_rr<"fs">; + def int_spu_si_fm: v4f32_rr<"fm">; + + def int_spu_si_fceq: v4f32_rr<"fceq">; + def int_spu_si_fcmeq: v4f32_rr<"fcmeq">; + def int_spu_si_fcgt: v4f32_rr<"fcgt">; + def int_spu_si_fcmgt: v4f32_rr<"fcmgt">; + + def int_spu_si_fma: v4f32_rrr<"fma">; + def int_spu_si_fnms: v4f32_rrr<"fnms">; + def int_spu_si_fms: v4f32_rrr<"fms">; + + def int_spu_si_dfa: v2f64_rr<"dfa">; + def int_spu_si_dfs: v2f64_rr<"dfs">; + def int_spu_si_dfm: v2f64_rr<"dfm">; + +//def int_spu_si_dfceq: v2f64_rr<"dfceq">; +//def int_spu_si_dfcmeq: v2f64_rr<"dfcmeq">; +//def int_spu_si_dfcgt: v2f64_rr<"dfcgt">; +//def int_spu_si_dfcmgt: v2f64_rr<"dfcmgt">; + + def int_spu_si_dfnma: v2f64_rr<"dfnma">; + def int_spu_si_dfma: v2f64_rr<"dfma">; + def int_spu_si_dfnms: v2f64_rr<"dfnms">; + def int_spu_si_dfms: v2f64_rr<"dfms">; +} diff --git a/include/llvm/IntrinsicsPowerPC.td b/include/llvm/IntrinsicsPowerPC.td new file mode 100644 index 0000000..ffb870d --- /dev/null +++ b/include/llvm/IntrinsicsPowerPC.td @@ -0,0 +1,470 @@ +//===- IntrinsicsPowerPC.td - Defines PowerPC intrinsics ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the PowerPC-specific intrinsics. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Definitions for all PowerPC intrinsics. +// + +// Non-altivec intrinsics. +let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". + // dcba/dcbf/dcbi/dcbst/dcbt/dcbz/dcbzl(PPC970) instructions. + def int_ppc_dcba : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbf : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbi : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbst : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbt : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbtst: Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbz : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_ppc_dcbzl : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + + // sync instruction + def int_ppc_sync : Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; +} + + +let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.". + /// PowerPC_Vec_Intrinsic - Base class for all altivec intrinsics. + class PowerPC_Vec_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types, + list<LLVMType> param_types, + list<IntrinsicProperty> properties> + : GCCBuiltin<!strconcat("__builtin_altivec_", GCCIntSuffix)>, + Intrinsic<ret_types, param_types, properties>; +} + +//===----------------------------------------------------------------------===// +// PowerPC Altivec Intrinsic Class Definitions. +// + +/// PowerPC_Vec_FF_Intrinsic - A PowerPC intrinsic that takes one v4f32 +/// vector and returns one. These intrinsics have no side effects. +class PowerPC_Vec_FF_Intrinsic<string GCCIntSuffix> + : PowerPC_Vec_Intrinsic<GCCIntSuffix, + [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + +/// PowerPC_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32 +/// vectors and returns one. These intrinsics have no side effects. +class PowerPC_Vec_FFF_Intrinsic<string GCCIntSuffix> + : PowerPC_Vec_Intrinsic<GCCIntSuffix, + [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + +/// PowerPC_Vec_BBB_Intrinsic - A PowerPC intrinsic that takes two v16f8 +/// vectors and returns one. These intrinsics have no side effects. +class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix> + : PowerPC_Vec_Intrinsic<GCCIntSuffix, + [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + +/// PowerPC_Vec_HHH_Intrinsic - A PowerPC intrinsic that takes two v8i16 +/// vectors and returns one. These intrinsics have no side effects. +class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix> + : PowerPC_Vec_Intrinsic<GCCIntSuffix, + [llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + +/// PowerPC_Vec_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32 +/// vectors and returns one. These intrinsics have no side effects. +class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix> + : PowerPC_Vec_Intrinsic<GCCIntSuffix, + [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + + +//===----------------------------------------------------------------------===// +// PowerPC Altivec Intrinsic Definitions. + +let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". + // Data Stream Control. + def int_ppc_altivec_dss : GCCBuiltin<"__builtin_altivec_dss">, + Intrinsic<[llvm_void_ty], [llvm_i32_ty], [IntrWriteMem]>; + def int_ppc_altivec_dssall : GCCBuiltin<"__builtin_altivec_dssall">, + Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + def int_ppc_altivec_dst : GCCBuiltin<"__builtin_altivec_dst">, + Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [IntrWriteMem]>; + def int_ppc_altivec_dstt : GCCBuiltin<"__builtin_altivec_dstt">, + Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [IntrWriteMem]>; + def int_ppc_altivec_dstst : GCCBuiltin<"__builtin_altivec_dstst">, + Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [IntrWriteMem]>; + def int_ppc_altivec_dststt : GCCBuiltin<"__builtin_altivec_dststt">, + Intrinsic<[llvm_void_ty], + [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [IntrWriteMem]>; + + // VSCR access. + def int_ppc_altivec_mfvscr : GCCBuiltin<"__builtin_altivec_mfvscr">, + Intrinsic<[llvm_v8i16_ty], [], [IntrReadMem]>; + def int_ppc_altivec_mtvscr : GCCBuiltin<"__builtin_altivec_mtvscr">, + Intrinsic<[llvm_void_ty], [llvm_v4i32_ty], [IntrWriteMem]>; + + + // Loads. These don't map directly to GCC builtins because they represent the + // source address with a single pointer. + def int_ppc_altivec_lvx : + Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem]>; + def int_ppc_altivec_lvxl : + Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem]>; + def int_ppc_altivec_lvebx : + Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem]>; + def int_ppc_altivec_lvehx : + Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty], [IntrReadMem]>; + def int_ppc_altivec_lvewx : + Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem]>; + + // Stores. These don't map directly to GCC builtins because they represent the + // source address with a single pointer. + def int_ppc_altivec_stvx : + Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty], + [IntrWriteMem]>; + def int_ppc_altivec_stvxl : + Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty], + [IntrWriteMem]>; + def int_ppc_altivec_stvebx : + Intrinsic<[llvm_void_ty], [llvm_v16i8_ty, llvm_ptr_ty], + [IntrWriteMem]>; + def int_ppc_altivec_stvehx : + Intrinsic<[llvm_void_ty], [llvm_v8i16_ty, llvm_ptr_ty], + [IntrWriteMem]>; + def int_ppc_altivec_stvewx : + Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty], + [IntrWriteMem]>; + + // Comparisons setting a vector. + def int_ppc_altivec_vcmpbfp : GCCBuiltin<"__builtin_altivec_vcmpbfp">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpeqfp : GCCBuiltin<"__builtin_altivec_vcmpeqfp">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgefp : GCCBuiltin<"__builtin_altivec_vcmpgefp">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtfp : GCCBuiltin<"__builtin_altivec_vcmpgtfp">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vcmpequw : GCCBuiltin<"__builtin_altivec_vcmpequw">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsw : GCCBuiltin<"__builtin_altivec_vcmpgtsw">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtuw : GCCBuiltin<"__builtin_altivec_vcmpgtuw">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vcmpequh : GCCBuiltin<"__builtin_altivec_vcmpequh">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsh : GCCBuiltin<"__builtin_altivec_vcmpgtsh">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtuh : GCCBuiltin<"__builtin_altivec_vcmpgtuh">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vcmpequb : GCCBuiltin<"__builtin_altivec_vcmpequb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsb : GCCBuiltin<"__builtin_altivec_vcmpgtsb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtub : GCCBuiltin<"__builtin_altivec_vcmpgtub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + + // Predicate Comparisons. The first operand specifies interpretation of CR6. + def int_ppc_altivec_vcmpbfp_p : GCCBuiltin<"__builtin_altivec_vcmpbfp_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpeqfp_p : GCCBuiltin<"__builtin_altivec_vcmpeqfp_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgefp_p : GCCBuiltin<"__builtin_altivec_vcmpgefp_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtfp_p : GCCBuiltin<"__builtin_altivec_vcmpgtfp_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vcmpequw_p : GCCBuiltin<"__builtin_altivec_vcmpequw_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsw_p : GCCBuiltin<"__builtin_altivec_vcmpgtsw_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtuw_p : GCCBuiltin<"__builtin_altivec_vcmpgtuw_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vcmpequh_p : GCCBuiltin<"__builtin_altivec_vcmpequh_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsh_p : GCCBuiltin<"__builtin_altivec_vcmpgtsh_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtuh_p : GCCBuiltin<"__builtin_altivec_vcmpgtuh_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vcmpequb_p : GCCBuiltin<"__builtin_altivec_vcmpequb_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsb_p : GCCBuiltin<"__builtin_altivec_vcmpgtsb_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtub_p : GCCBuiltin<"__builtin_altivec_vcmpgtub_p">, + Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty], + [IntrNoMem]>; +} + +// Vector average. +def int_ppc_altivec_vavgsb : PowerPC_Vec_BBB_Intrinsic<"vavgsb">; +def int_ppc_altivec_vavgsh : PowerPC_Vec_HHH_Intrinsic<"vavgsh">; +def int_ppc_altivec_vavgsw : PowerPC_Vec_WWW_Intrinsic<"vavgsw">; +def int_ppc_altivec_vavgub : PowerPC_Vec_BBB_Intrinsic<"vavgub">; +def int_ppc_altivec_vavguh : PowerPC_Vec_HHH_Intrinsic<"vavguh">; +def int_ppc_altivec_vavguw : PowerPC_Vec_WWW_Intrinsic<"vavguw">; + +// Vector maximum. +def int_ppc_altivec_vmaxfp : PowerPC_Vec_FFF_Intrinsic<"vmaxfp">; +def int_ppc_altivec_vmaxsb : PowerPC_Vec_BBB_Intrinsic<"vmaxsb">; +def int_ppc_altivec_vmaxsh : PowerPC_Vec_HHH_Intrinsic<"vmaxsh">; +def int_ppc_altivec_vmaxsw : PowerPC_Vec_WWW_Intrinsic<"vmaxsw">; +def int_ppc_altivec_vmaxub : PowerPC_Vec_BBB_Intrinsic<"vmaxub">; +def int_ppc_altivec_vmaxuh : PowerPC_Vec_HHH_Intrinsic<"vmaxuh">; +def int_ppc_altivec_vmaxuw : PowerPC_Vec_WWW_Intrinsic<"vmaxuw">; + +// Vector minimum. +def int_ppc_altivec_vminfp : PowerPC_Vec_FFF_Intrinsic<"vminfp">; +def int_ppc_altivec_vminsb : PowerPC_Vec_BBB_Intrinsic<"vminsb">; +def int_ppc_altivec_vminsh : PowerPC_Vec_HHH_Intrinsic<"vminsh">; +def int_ppc_altivec_vminsw : PowerPC_Vec_WWW_Intrinsic<"vminsw">; +def int_ppc_altivec_vminub : PowerPC_Vec_BBB_Intrinsic<"vminub">; +def int_ppc_altivec_vminuh : PowerPC_Vec_HHH_Intrinsic<"vminuh">; +def int_ppc_altivec_vminuw : PowerPC_Vec_WWW_Intrinsic<"vminuw">; + +// Saturating adds. +def int_ppc_altivec_vaddubs : PowerPC_Vec_BBB_Intrinsic<"vaddubs">; +def int_ppc_altivec_vaddsbs : PowerPC_Vec_BBB_Intrinsic<"vaddsbs">; +def int_ppc_altivec_vadduhs : PowerPC_Vec_HHH_Intrinsic<"vadduhs">; +def int_ppc_altivec_vaddshs : PowerPC_Vec_HHH_Intrinsic<"vaddshs">; +def int_ppc_altivec_vadduws : PowerPC_Vec_WWW_Intrinsic<"vadduws">; +def int_ppc_altivec_vaddsws : PowerPC_Vec_WWW_Intrinsic<"vaddsws">; +def int_ppc_altivec_vaddcuw : PowerPC_Vec_WWW_Intrinsic<"vaddcuw">; + +// Saturating subs. +def int_ppc_altivec_vsububs : PowerPC_Vec_BBB_Intrinsic<"vsububs">; +def int_ppc_altivec_vsubsbs : PowerPC_Vec_BBB_Intrinsic<"vsubsbs">; +def int_ppc_altivec_vsubuhs : PowerPC_Vec_HHH_Intrinsic<"vsubuhs">; +def int_ppc_altivec_vsubshs : PowerPC_Vec_HHH_Intrinsic<"vsubshs">; +def int_ppc_altivec_vsubuws : PowerPC_Vec_WWW_Intrinsic<"vsubuws">; +def int_ppc_altivec_vsubsws : PowerPC_Vec_WWW_Intrinsic<"vsubsws">; +def int_ppc_altivec_vsubcuw : PowerPC_Vec_WWW_Intrinsic<"vsubcuw">; + +let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.". + // Saturating multiply-adds. + def int_ppc_altivec_vmhaddshs : GCCBuiltin<"__builtin_altivec_vmhaddshs">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>; + def int_ppc_altivec_vmhraddshs : GCCBuiltin<"__builtin_altivec_vmhraddshs">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>; + + def int_ppc_altivec_vmaddfp : GCCBuiltin<"__builtin_altivec_vmaddfp">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>; + def int_ppc_altivec_vnmsubfp : GCCBuiltin<"__builtin_altivec_vnmsubfp">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>; + + // Vector Multiply Sum Intructions. + def int_ppc_altivec_vmsummbm : GCCBuiltin<"__builtin_altivec_vmsummbm">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_ppc_altivec_vmsumshm : GCCBuiltin<"__builtin_altivec_vmsumshm">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_ppc_altivec_vmsumshs : GCCBuiltin<"__builtin_altivec_vmsumshs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_ppc_altivec_vmsumubm : GCCBuiltin<"__builtin_altivec_vmsumubm">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_ppc_altivec_vmsumuhm : GCCBuiltin<"__builtin_altivec_vmsumuhm">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_ppc_altivec_vmsumuhs : GCCBuiltin<"__builtin_altivec_vmsumuhs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty, + llvm_v4i32_ty], [IntrNoMem]>; + + // Vector Multiply Intructions. + def int_ppc_altivec_vmulesb : GCCBuiltin<"__builtin_altivec_vmulesb">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vmulesh : GCCBuiltin<"__builtin_altivec_vmulesh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vmuleub : GCCBuiltin<"__builtin_altivec_vmuleub">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vmuleuh : GCCBuiltin<"__builtin_altivec_vmuleuh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vmulosb : GCCBuiltin<"__builtin_altivec_vmulosb">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vmulosh : GCCBuiltin<"__builtin_altivec_vmulosh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vmuloub : GCCBuiltin<"__builtin_altivec_vmuloub">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_ppc_altivec_vmulouh : GCCBuiltin<"__builtin_altivec_vmulouh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + + // Vector Sum Intructions. + def int_ppc_altivec_vsumsws : GCCBuiltin<"__builtin_altivec_vsumsws">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vsum2sws : GCCBuiltin<"__builtin_altivec_vsum2sws">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vsum4sbs : GCCBuiltin<"__builtin_altivec_vsum4sbs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vsum4shs : GCCBuiltin<"__builtin_altivec_vsum4shs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vsum4ubs : GCCBuiltin<"__builtin_altivec_vsum4ubs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty], + [IntrNoMem]>; + + // Other multiplies. + def int_ppc_altivec_vmladduhm : GCCBuiltin<"__builtin_altivec_vmladduhm">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + + // Packs. + def int_ppc_altivec_vpkpx : GCCBuiltin<"__builtin_altivec_vpkpx">, + Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vpkshss : GCCBuiltin<"__builtin_altivec_vpkshss">, + Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vpkshus : GCCBuiltin<"__builtin_altivec_vpkshus">, + Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_ppc_altivec_vpkswss : GCCBuiltin<"__builtin_altivec_vpkswss">, + Intrinsic<[llvm_v16i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vpkswus : GCCBuiltin<"__builtin_altivec_vpkswus">, + Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + // vpkuhum is lowered to a shuffle. + def int_ppc_altivec_vpkuhus : GCCBuiltin<"__builtin_altivec_vpkuhus">, + Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + // vpkuwum is lowered to a shuffle. + def int_ppc_altivec_vpkuwus : GCCBuiltin<"__builtin_altivec_vpkuwus">, + Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + + // Unpacks. + def int_ppc_altivec_vupkhpx : GCCBuiltin<"__builtin_altivec_vupkhpx">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_ppc_altivec_vupkhsb : GCCBuiltin<"__builtin_altivec_vupkhsb">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_ppc_altivec_vupkhsh : GCCBuiltin<"__builtin_altivec_vupkhsh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_ppc_altivec_vupklpx : GCCBuiltin<"__builtin_altivec_vupklpx">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_ppc_altivec_vupklsb : GCCBuiltin<"__builtin_altivec_vupklsb">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_ppc_altivec_vupklsh : GCCBuiltin<"__builtin_altivec_vupklsh">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + + + // FP <-> integer conversion. + def int_ppc_altivec_vcfsx : GCCBuiltin<"__builtin_altivec_vcfsx">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcfux : GCCBuiltin<"__builtin_altivec_vcfux">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vctsxs : GCCBuiltin<"__builtin_altivec_vctsxs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vctuxs : GCCBuiltin<"__builtin_altivec_vctuxs">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem]>; + + def int_ppc_altivec_vrfim : GCCBuiltin<"__builtin_altivec_vrfim">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_ppc_altivec_vrfin : GCCBuiltin<"__builtin_altivec_vrfin">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_ppc_altivec_vrfip : GCCBuiltin<"__builtin_altivec_vrfip">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_ppc_altivec_vrfiz : GCCBuiltin<"__builtin_altivec_vrfiz">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; +} + +def int_ppc_altivec_vsl : PowerPC_Vec_WWW_Intrinsic<"vsl">; +def int_ppc_altivec_vslo : PowerPC_Vec_WWW_Intrinsic<"vslo">; + +def int_ppc_altivec_vslb : PowerPC_Vec_BBB_Intrinsic<"vslb">; +def int_ppc_altivec_vslh : PowerPC_Vec_HHH_Intrinsic<"vslh">; +def int_ppc_altivec_vslw : PowerPC_Vec_WWW_Intrinsic<"vslw">; + +// Right Shifts. +def int_ppc_altivec_vsr : PowerPC_Vec_WWW_Intrinsic<"vsr">; +def int_ppc_altivec_vsro : PowerPC_Vec_WWW_Intrinsic<"vsro">; + +def int_ppc_altivec_vsrb : PowerPC_Vec_BBB_Intrinsic<"vsrb">; +def int_ppc_altivec_vsrh : PowerPC_Vec_HHH_Intrinsic<"vsrh">; +def int_ppc_altivec_vsrw : PowerPC_Vec_WWW_Intrinsic<"vsrw">; +def int_ppc_altivec_vsrab : PowerPC_Vec_BBB_Intrinsic<"vsrab">; +def int_ppc_altivec_vsrah : PowerPC_Vec_HHH_Intrinsic<"vsrah">; +def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">; + +// Rotates. +def int_ppc_altivec_vrlb : PowerPC_Vec_BBB_Intrinsic<"vrlb">; +def int_ppc_altivec_vrlh : PowerPC_Vec_HHH_Intrinsic<"vrlh">; +def int_ppc_altivec_vrlw : PowerPC_Vec_WWW_Intrinsic<"vrlw">; + +let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.". + // Miscellaneous. + def int_ppc_altivec_lvsl : + Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>; + def int_ppc_altivec_lvsr : + Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>; + + def int_ppc_altivec_vperm : GCCBuiltin<"__builtin_altivec_vperm_4si">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty, llvm_v16i8_ty], [IntrNoMem]>; + def int_ppc_altivec_vsel : GCCBuiltin<"__builtin_altivec_vsel_4si">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>; +} + +def int_ppc_altivec_vexptefp : PowerPC_Vec_FF_Intrinsic<"vexptefp">; +def int_ppc_altivec_vlogefp : PowerPC_Vec_FF_Intrinsic<"vlogefp">; +def int_ppc_altivec_vrefp : PowerPC_Vec_FF_Intrinsic<"vrefp">; +def int_ppc_altivec_vrsqrtefp : PowerPC_Vec_FF_Intrinsic<"vrsqrtefp">; diff --git a/include/llvm/IntrinsicsX86.td b/include/llvm/IntrinsicsX86.td new file mode 100644 index 0000000..37ba59c --- /dev/null +++ b/include/llvm/IntrinsicsX86.td @@ -0,0 +1,1065 @@ +//===- IntrinsicsX86.td - Defines X86 intrinsics -----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the X86-specific intrinsics. +// +//===----------------------------------------------------------------------===// + + +//===----------------------------------------------------------------------===// +// SSE1 + +// Arithmetic ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_add_ss : GCCBuiltin<"__builtin_ia32_addss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_sub_ss : GCCBuiltin<"__builtin_ia32_subss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_mul_ss : GCCBuiltin<"__builtin_ia32_mulss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_div_ss : GCCBuiltin<"__builtin_ia32_divss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_sqrt_ss : GCCBuiltin<"__builtin_ia32_sqrtss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_sse_sqrt_ps : GCCBuiltin<"__builtin_ia32_sqrtps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_sse_rcp_ss : GCCBuiltin<"__builtin_ia32_rcpss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_sse_rcp_ps : GCCBuiltin<"__builtin_ia32_rcpps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_sse_rsqrt_ss : GCCBuiltin<"__builtin_ia32_rsqrtss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_sse_rsqrt_ps : GCCBuiltin<"__builtin_ia32_rsqrtps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_sse_min_ss : GCCBuiltin<"__builtin_ia32_minss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_min_ps : GCCBuiltin<"__builtin_ia32_minps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_max_ss : GCCBuiltin<"__builtin_ia32_maxss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_max_ps : GCCBuiltin<"__builtin_ia32_maxps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; +} + +// Comparison ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_cmp_ss : + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; + def int_x86_sse_cmp_ps : + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; + def int_x86_sse_comieq_ss : GCCBuiltin<"__builtin_ia32_comieq">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_comilt_ss : GCCBuiltin<"__builtin_ia32_comilt">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_comile_ss : GCCBuiltin<"__builtin_ia32_comile">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_comigt_ss : GCCBuiltin<"__builtin_ia32_comigt">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_comige_ss : GCCBuiltin<"__builtin_ia32_comige">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_comineq_ss : GCCBuiltin<"__builtin_ia32_comineq">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_ucomieq_ss : GCCBuiltin<"__builtin_ia32_ucomieq">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_ucomilt_ss : GCCBuiltin<"__builtin_ia32_ucomilt">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_ucomile_ss : GCCBuiltin<"__builtin_ia32_ucomile">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_ucomigt_ss : GCCBuiltin<"__builtin_ia32_ucomigt">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_ucomige_ss : GCCBuiltin<"__builtin_ia32_ucomige">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_ucomineq_ss : GCCBuiltin<"__builtin_ia32_ucomineq">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; +} + + +// Conversion ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_cvtss2si : GCCBuiltin<"__builtin_ia32_cvtss2si">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvtss2si64 : GCCBuiltin<"__builtin_ia32_cvtss2si64">, + Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvttss2si : GCCBuiltin<"__builtin_ia32_cvttss2si">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvttss2si64 : GCCBuiltin<"__builtin_ia32_cvttss2si64">, + Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvtsi2ss : GCCBuiltin<"__builtin_ia32_cvtsi2ss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse_cvtsi642ss : GCCBuiltin<"__builtin_ia32_cvtsi642ss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_i64_ty], [IntrNoMem]>; + def int_x86_sse_cvtps2pi : GCCBuiltin<"__builtin_ia32_cvtps2pi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvttps2pi: GCCBuiltin<"__builtin_ia32_cvttps2pi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvtpi2ps : GCCBuiltin<"__builtin_ia32_cvtpi2ps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v2i32_ty], [IntrNoMem]>; +} + +// SIMD load ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_loadu_ps : GCCBuiltin<"__builtin_ia32_loadups">, + Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadMem]>; +} + +// SIMD store ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v4f32_ty], [IntrWriteMem]>; +} + +// Cacheability support ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_movnt_ps : GCCBuiltin<"__builtin_ia32_movntps">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v4f32_ty], [IntrWriteMem]>; + def int_x86_sse_sfence : GCCBuiltin<"__builtin_ia32_sfence">, + Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; +} + +// Control register. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_stmxcsr : + Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_x86_sse_ldmxcsr : + Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; +} + +// Misc. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse_movmsk_ps : GCCBuiltin<"__builtin_ia32_movmskps">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; +} + +//===----------------------------------------------------------------------===// +// SSE2 + +// FP arithmetic ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_add_sd : GCCBuiltin<"__builtin_ia32_addsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_sub_sd : GCCBuiltin<"__builtin_ia32_subsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_mul_sd : GCCBuiltin<"__builtin_ia32_mulsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_div_sd : GCCBuiltin<"__builtin_ia32_divsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_sqrt_sd : GCCBuiltin<"__builtin_ia32_sqrtsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], + [IntrNoMem]>; + def int_x86_sse2_sqrt_pd : GCCBuiltin<"__builtin_ia32_sqrtpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], + [IntrNoMem]>; + def int_x86_sse2_min_sd : GCCBuiltin<"__builtin_ia32_minsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_min_pd : GCCBuiltin<"__builtin_ia32_minpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_max_sd : GCCBuiltin<"__builtin_ia32_maxsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_max_pd : GCCBuiltin<"__builtin_ia32_maxpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; +} + +// FP comparison ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_cmp_sd : + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; + def int_x86_sse2_cmp_pd : + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; + def int_x86_sse2_comieq_sd : GCCBuiltin<"__builtin_ia32_comisdeq">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_comilt_sd : GCCBuiltin<"__builtin_ia32_comisdlt">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_comile_sd : GCCBuiltin<"__builtin_ia32_comisdle">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_comigt_sd : GCCBuiltin<"__builtin_ia32_comisdgt">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_comige_sd : GCCBuiltin<"__builtin_ia32_comisdge">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_comineq_sd : GCCBuiltin<"__builtin_ia32_comisdneq">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_ucomieq_sd : GCCBuiltin<"__builtin_ia32_ucomisdeq">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_ucomilt_sd : GCCBuiltin<"__builtin_ia32_ucomisdlt">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_ucomile_sd : GCCBuiltin<"__builtin_ia32_ucomisdle">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_ucomigt_sd : GCCBuiltin<"__builtin_ia32_ucomisdgt">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_ucomige_sd : GCCBuiltin<"__builtin_ia32_ucomisdge">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_ucomineq_sd : GCCBuiltin<"__builtin_ia32_ucomisdneq">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; +} + +// Integer arithmetic ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_sse2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_sse2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pminu_b : GCCBuiltin<"__builtin_ia32_pminub128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; + def int_x86_sse2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem, Commutative]>; +} + +// Integer shift ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_psll_w : GCCBuiltin<"__builtin_ia32_psllw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_psll_d : GCCBuiltin<"__builtin_ia32_pslld128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_sse2_psll_q : GCCBuiltin<"__builtin_ia32_psllq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_v2i64_ty], [IntrNoMem]>; + def int_x86_sse2_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_psrl_d : GCCBuiltin<"__builtin_ia32_psrld128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_v2i64_ty], [IntrNoMem]>; + def int_x86_sse2_psra_w : GCCBuiltin<"__builtin_ia32_psraw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_psra_d : GCCBuiltin<"__builtin_ia32_psrad128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + + def int_x86_sse2_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrai_d : GCCBuiltin<"__builtin_ia32_psradi128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_i32_ty], [IntrNoMem]>; + + def int_x86_sse2_psll_dq : GCCBuiltin<"__builtin_ia32_pslldqi128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrl_dq : GCCBuiltin<"__builtin_ia32_psrldqi128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psll_dq_bs : GCCBuiltin<"__builtin_ia32_pslldqi128_byteshift">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_psrl_dq_bs : GCCBuiltin<"__builtin_ia32_psrldqi128_byteshift">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_i32_ty], [IntrNoMem]>; +} + +// Integer comparison ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_sse2_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_sse2_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_sse2_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; +} + +// Conversion ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_cvtdq2pd : GCCBuiltin<"__builtin_ia32_cvtdq2pd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_sse2_cvtdq2ps : GCCBuiltin<"__builtin_ia32_cvtdq2ps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_sse2_cvtpd2dq : GCCBuiltin<"__builtin_ia32_cvtpd2dq">, + Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvttpd2dq : GCCBuiltin<"__builtin_ia32_cvttpd2dq">, + Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvtpd2ps : GCCBuiltin<"__builtin_ia32_cvtpd2ps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvtps2dq : GCCBuiltin<"__builtin_ia32_cvtps2dq">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse2_cvttps2dq : GCCBuiltin<"__builtin_ia32_cvttps2dq">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse2_cvtps2pd : GCCBuiltin<"__builtin_ia32_cvtps2pd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse2_cvtsd2si : GCCBuiltin<"__builtin_ia32_cvtsd2si">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvtsd2si64 : GCCBuiltin<"__builtin_ia32_cvtsd2si64">, + Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvttsd2si : GCCBuiltin<"__builtin_ia32_cvttsd2si">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_cvttsd2si64">, + Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvtsi2sd : GCCBuiltin<"__builtin_ia32_cvtsi2sd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse2_cvtsi642sd : GCCBuiltin<"__builtin_ia32_cvtsi642sd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_i64_ty], [IntrNoMem]>; + def int_x86_sse2_cvtsd2ss : GCCBuiltin<"__builtin_ia32_cvtsd2ss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_cvtss2sd : GCCBuiltin<"__builtin_ia32_cvtss2sd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse_cvttpd2pi: GCCBuiltin<"__builtin_ia32_cvttpd2pi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse_cvtpi2pd : GCCBuiltin<"__builtin_ia32_cvtpi2pd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2i32_ty], [IntrNoMem]>; +} + +// SIMD load ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_loadu_pd : GCCBuiltin<"__builtin_ia32_loadupd">, + Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty], [IntrReadMem]>; + def int_x86_sse2_loadu_dq : GCCBuiltin<"__builtin_ia32_loaddqu">, + Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem]>; +} + +// SIMD store ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v2f64_ty], [IntrWriteMem]>; + def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v16i8_ty], [IntrWriteMem]>; + def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v4i32_ty], [IntrWriteMem]>; +} + +// Cacheability support ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_movnt_dq : GCCBuiltin<"__builtin_ia32_movntdq">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v2i64_ty], [IntrWriteMem]>; + def int_x86_sse2_movnt_pd : GCCBuiltin<"__builtin_ia32_movntpd">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v2f64_ty], [IntrWriteMem]>; + def int_x86_sse2_movnt_i : GCCBuiltin<"__builtin_ia32_movnti">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_i32_ty], [IntrWriteMem]>; +} + +// Misc. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse2_packsswb_128 : GCCBuiltin<"__builtin_ia32_packsswb128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_packssdw_128 : GCCBuiltin<"__builtin_ia32_packssdw128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_sse2_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_sse2_movmsk_pd : GCCBuiltin<"__builtin_ia32_movmskpd">, + Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">, + Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">, + Intrinsic<[llvm_void_ty], [llvm_v16i8_ty, + llvm_v16i8_ty, llvm_ptr_ty], [IntrWriteMem]>; + def int_x86_sse2_clflush : GCCBuiltin<"__builtin_ia32_clflush">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>; + def int_x86_sse2_lfence : GCCBuiltin<"__builtin_ia32_lfence">, + Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + def int_x86_sse2_mfence : GCCBuiltin<"__builtin_ia32_mfence">, + Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; +} + +//===----------------------------------------------------------------------===// +// SSE3 + +// Addition / subtraction ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse3_addsub_ps : GCCBuiltin<"__builtin_ia32_addsubps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse3_addsub_pd : GCCBuiltin<"__builtin_ia32_addsubpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; +} + +// Horizontal ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse3_hadd_ps : GCCBuiltin<"__builtin_ia32_haddps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse3_hadd_pd : GCCBuiltin<"__builtin_ia32_haddpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_sse3_hsub_ps : GCCBuiltin<"__builtin_ia32_hsubps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_sse3_hsub_pd : GCCBuiltin<"__builtin_ia32_hsubpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_v2f64_ty], [IntrNoMem]>; +} + +// Specialized unaligned load. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse3_ldu_dq : GCCBuiltin<"__builtin_ia32_lddqu">, + Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem]>; +} + +// Thread synchronization ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse3_monitor : GCCBuiltin<"__builtin_ia32_monitor">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>; + def int_x86_sse3_mwait : GCCBuiltin<"__builtin_ia32_mwait">, + Intrinsic<[llvm_void_ty], [llvm_i32_ty, + llvm_i32_ty], [IntrWriteMem]>; +} + +//===----------------------------------------------------------------------===// +// SSSE3 + +// Horizontal arithmetic ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_ssse3_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_phadd_w_128 : GCCBuiltin<"__builtin_ia32_phaddw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + + def int_x86_ssse3_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem]>; + def int_x86_ssse3_phadd_d_128 : GCCBuiltin<"__builtin_ia32_phaddd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + + def int_x86_ssse3_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_phadd_sw_128 : GCCBuiltin<"__builtin_ia32_phaddsw128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + + def int_x86_ssse3_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_phsub_w_128 : GCCBuiltin<"__builtin_ia32_phsubw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + + def int_x86_ssse3_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem]>; + def int_x86_ssse3_phsub_d_128 : GCCBuiltin<"__builtin_ia32_phsubd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; + + def int_x86_ssse3_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_phsub_sw_128 : GCCBuiltin<"__builtin_ia32_phsubsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + + def int_x86_ssse3_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; +} + +// Packed multiply high with round and scale +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_ssse3_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + def int_x86_ssse3_pmul_hr_sw_128 : GCCBuiltin<"__builtin_ia32_pmulhrsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem, Commutative]>; +} + +// Shuffle ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_ssse3_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_ssse3_pshuf_b_128 : GCCBuiltin<"__builtin_ia32_pshufb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem]>; +} + +// Sign ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_ssse3_psign_b : GCCBuiltin<"__builtin_ia32_psignb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_ssse3_psign_b_128 : GCCBuiltin<"__builtin_ia32_psignb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty], [IntrNoMem]>; + + def int_x86_ssse3_psign_w : GCCBuiltin<"__builtin_ia32_psignw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_psign_w_128 : GCCBuiltin<"__builtin_ia32_psignw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty], [IntrNoMem]>; + + def int_x86_ssse3_psign_d : GCCBuiltin<"__builtin_ia32_psignd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem]>; + def int_x86_ssse3_psign_d_128 : GCCBuiltin<"__builtin_ia32_psignd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty], [IntrNoMem]>; +} + +// Absolute value ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_ssse3_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_ssse3_pabs_b_128 : GCCBuiltin<"__builtin_ia32_pabsb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>; + + def int_x86_ssse3_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_ssse3_pabs_w_128 : GCCBuiltin<"__builtin_ia32_pabsw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>; + + def int_x86_ssse3_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + def int_x86_ssse3_pabs_d_128 : GCCBuiltin<"__builtin_ia32_pabsd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>; +} + +// Align ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_ssse3_palign_r : GCCBuiltin<"__builtin_ia32_palignr">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, + llvm_v1i64_ty, llvm_i16_ty], [IntrNoMem]>; + def int_x86_ssse3_palign_r_128 : GCCBuiltin<"__builtin_ia32_palignr128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>; +} + +//===----------------------------------------------------------------------===// +// SSE4.1 + +// FP rounding ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_round_ss : GCCBuiltin<"__builtin_ia32_roundss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse41_round_ps : GCCBuiltin<"__builtin_ia32_roundps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse41_round_sd : GCCBuiltin<"__builtin_ia32_roundsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_sse41_round_pd : GCCBuiltin<"__builtin_ia32_roundpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, + llvm_i32_ty], [IntrNoMem]>; +} + +// Vector sign and zero extend +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_pmovsxbd : GCCBuiltin<"__builtin_ia32_pmovsxbd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovsxbq : GCCBuiltin<"__builtin_ia32_pmovsxbq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovsxbw : GCCBuiltin<"__builtin_ia32_pmovsxbw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovsxdq : GCCBuiltin<"__builtin_ia32_pmovsxdq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovsxwd : GCCBuiltin<"__builtin_ia32_pmovsxwd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovsxwq : GCCBuiltin<"__builtin_ia32_pmovsxwq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovzxbd : GCCBuiltin<"__builtin_ia32_pmovzxbd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovzxbq : GCCBuiltin<"__builtin_ia32_pmovzxbq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovzxbw : GCCBuiltin<"__builtin_ia32_pmovzxbw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovzxdq : GCCBuiltin<"__builtin_ia32_pmovzxdq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovzxwd : GCCBuiltin<"__builtin_ia32_pmovzxwd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_sse41_pmovzxwq : GCCBuiltin<"__builtin_ia32_pmovzxwq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], + [IntrNoMem]>; +} + +// Vector min element +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_phminposuw : GCCBuiltin<"__builtin_ia32_phminposuw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], + [IntrNoMem]>; +} + +// Vector compare, min, max +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_pcmpeqq : GCCBuiltin<"__builtin_ia32_pcmpeqq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse42_pcmpgtq : GCCBuiltin<"__builtin_ia32_pcmpgtq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_sse41_pmaxsb : GCCBuiltin<"__builtin_ia32_pmaxsb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pmaxsd : GCCBuiltin<"__builtin_ia32_pmaxsd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pmaxud : GCCBuiltin<"__builtin_ia32_pmaxud128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pmaxuw : GCCBuiltin<"__builtin_ia32_pmaxuw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pminsb : GCCBuiltin<"__builtin_ia32_pminsb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pminsd : GCCBuiltin<"__builtin_ia32_pminsd128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pminud : GCCBuiltin<"__builtin_ia32_pminud128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pminuw : GCCBuiltin<"__builtin_ia32_pminuw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem, Commutative]>; +} + +// Vector pack +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_packusdw : GCCBuiltin<"__builtin_ia32_packusdw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; +} + +// Vector multiply +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_pmuldq : GCCBuiltin<"__builtin_ia32_pmuldq128">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_pmulld : GCCBuiltin<"__builtin_ia32_pmulld128">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem, Commutative]>; +} + +// Vector extract +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_pextrb : + Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_pextrd : + Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_pextrq : + Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_extractps : GCCBuiltin<"__builtin_ia32_extractps128">, + Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem]>; +} + +// Vector insert +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_pinsrb : GCCBuiltin<"__builtin_ia32_vec_set_v16qi">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_insertps : GCCBuiltin<"__builtin_ia32_insertps128">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,llvm_i32_ty], + [IntrNoMem]>; +} + +// Vector blend +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_pblendvb : GCCBuiltin<"__builtin_ia32_pblendvb128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_sse41_pblendw : GCCBuiltin<"__builtin_ia32_pblendw128">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_blendpd : GCCBuiltin<"__builtin_ia32_blendpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_blendps : GCCBuiltin<"__builtin_ia32_blendps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_x86_sse41_blendvpd : GCCBuiltin<"__builtin_ia32_blendvpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,llvm_v2f64_ty], + [IntrNoMem]>; + def int_x86_sse41_blendvps : GCCBuiltin<"__builtin_ia32_blendvps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,llvm_v4f32_ty], + [IntrNoMem]>; +} + +// Vector dot product +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_dppd : GCCBuiltin<"__builtin_ia32_dppd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,llvm_i32_ty], + [IntrNoMem, Commutative]>; + def int_x86_sse41_dpps : GCCBuiltin<"__builtin_ia32_dpps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,llvm_i32_ty], + [IntrNoMem, Commutative]>; +} + +// Vector sum of absolute differences +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw128">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i32_ty], + [IntrNoMem, Commutative]>; +} + +// Cacheability support ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_sse41_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa">, + Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty], [IntrReadMem]>; +} + + +//===----------------------------------------------------------------------===// +// MMX + +// Empty MMX state op. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_mmx_emms : GCCBuiltin<"__builtin_ia32_emms">, + Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; + def int_x86_mmx_femms : GCCBuiltin<"__builtin_ia32_femms">, + Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>; +} + +// Integer arithmetic ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + // Addition + def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_padds_w : GCCBuiltin<"__builtin_ia32_paddsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + + def int_x86_mmx_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + + // Subtraction + def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_mmx_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + + def int_x86_mmx_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_mmx_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + + // Multiplication + def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + + // Averages + def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + + // Maximum + def int_x86_mmx_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + + // Minimum + def int_x86_mmx_pminu_b : GCCBuiltin<"__builtin_ia32_pminub">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem, Commutative]>; + def int_x86_mmx_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem, Commutative]>; + + // Packed sum of absolute differences + def int_x86_mmx_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem, Commutative]>; +} + +// Integer shift ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + // Shift left logical + def int_x86_mmx_psll_w : GCCBuiltin<"__builtin_ia32_psllw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v1i64_ty], [IntrNoMem]>; + def int_x86_mmx_psll_d : GCCBuiltin<"__builtin_ia32_pslld">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v1i64_ty], [IntrNoMem]>; + def int_x86_mmx_psll_q : GCCBuiltin<"__builtin_ia32_psllq">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, + llvm_v1i64_ty], [IntrNoMem]>; + + def int_x86_mmx_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v1i64_ty], [IntrNoMem]>; + def int_x86_mmx_psrl_d : GCCBuiltin<"__builtin_ia32_psrld">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v1i64_ty], [IntrNoMem]>; + def int_x86_mmx_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, + llvm_v1i64_ty], [IntrNoMem]>; + + def int_x86_mmx_psra_w : GCCBuiltin<"__builtin_ia32_psraw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v1i64_ty], [IntrNoMem]>; + def int_x86_mmx_psra_d : GCCBuiltin<"__builtin_ia32_psrad">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v1i64_ty], [IntrNoMem]>; + + def int_x86_mmx_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_mmx_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_mmx_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, + llvm_i32_ty], [IntrNoMem]>; + + def int_x86_mmx_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_mmx_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_mmx_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, + llvm_i32_ty], [IntrNoMem]>; + + def int_x86_mmx_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_i32_ty], [IntrNoMem]>; + def int_x86_mmx_psrai_d : GCCBuiltin<"__builtin_ia32_psradi">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_i32_ty], [IntrNoMem]>; +} + +// Pack ops. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_mmx_packsswb : GCCBuiltin<"__builtin_ia32_packsswb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_mmx_packssdw : GCCBuiltin<"__builtin_ia32_packssdw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem]>; + def int_x86_mmx_packuswb : GCCBuiltin<"__builtin_ia32_packuswb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; +} + +// Integer comparison ops +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem]>; + + def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; + def int_x86_mmx_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, + llvm_v4i16_ty], [IntrNoMem]>; + def int_x86_mmx_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, + llvm_v2i32_ty], [IntrNoMem]>; +} + +// Misc. +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">, + Intrinsic<[llvm_void_ty], + [llvm_v8i8_ty, llvm_v8i8_ty, llvm_ptr_ty], + [IntrWriteMem]>; + + def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">, + Intrinsic<[llvm_i32_ty], [llvm_v8i8_ty], [IntrNoMem]>; + + def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">, + Intrinsic<[llvm_void_ty], [llvm_ptr_ty, + llvm_v1i64_ty], [IntrWriteMem]>; +} diff --git a/include/llvm/IntrinsicsXCore.td b/include/llvm/IntrinsicsXCore.td new file mode 100644 index 0000000..a86cda2 --- /dev/null +++ b/include/llvm/IntrinsicsXCore.td @@ -0,0 +1,14 @@ +//==- IntrinsicsXCore.td - XCore intrinsics -*- tablegen -*-==// +// +// Copyright (C) 2008 XMOS +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the XCore-specific intrinsics. +// +//===----------------------------------------------------------------------===// + +let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.". + def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>; + def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>; +} diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h new file mode 100644 index 0000000..edd27223 --- /dev/null +++ b/include/llvm/LinkAllPasses.h @@ -0,0 +1,142 @@ +//===- llvm/LinkAllPasses.h ------------ Reference All Passes ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file pulls in all transformation and analysis passes for tools +// like opt and bugpoint that need this functionality. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LINKALLPASSES_H +#define LLVM_LINKALLPASSES_H + +#include "llvm/Analysis/AliasSetTracker.h" +#include "llvm/Analysis/FindUsedTypes.h" +#include "llvm/Analysis/IntervalPartition.h" +#include "llvm/Analysis/LoopVR.h" +#include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/PostDominators.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Assembly/PrintModulePass.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/Function.h" +#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/IPO.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h" +#include <cstdlib> + +namespace { + struct ForcePassLinking { + ForcePassLinking() { + // We must reference the passes in such a way that compilers will not + // delete it all as dead code, even with whole program optimization, + // yet is effectively a NO-OP. As the compiler isn't smart enough + // to know that getenv() never returns -1, this will do the job. + if (std::getenv("bar") != (char*) -1) + return; + + (void) llvm::createAAEvalPass(); + (void) llvm::createAggressiveDCEPass(); + (void) llvm::createAliasAnalysisCounterPass(); + (void) llvm::createAliasDebugger(); + (void) llvm::createAndersensPass(); + (void) llvm::createArgumentPromotionPass(); + (void) llvm::createStructRetPromotionPass(); + (void) llvm::createBasicAliasAnalysisPass(); + (void) llvm::createLibCallAliasAnalysisPass(0); + (void) llvm::createBlockPlacementPass(); + (void) llvm::createBlockProfilerPass(); + (void) llvm::createBreakCriticalEdgesPass(); + (void) llvm::createCFGSimplificationPass(); + (void) llvm::createConstantMergePass(); + (void) llvm::createConstantPropagationPass(); + (void) llvm::createDeadArgEliminationPass(); + (void) llvm::createDeadCodeEliminationPass(); + (void) llvm::createDeadInstEliminationPass(); + (void) llvm::createDeadStoreEliminationPass(); + (void) llvm::createDeadTypeEliminationPass(); + (void) llvm::createEdgeProfilerPass(); + (void) llvm::createFunctionInliningPass(); + (void) llvm::createAlwaysInlinerPass(); + (void) llvm::createFunctionProfilerPass(); + (void) llvm::createGlobalDCEPass(); + (void) llvm::createGlobalOptimizerPass(); + (void) llvm::createGlobalsModRefPass(); + (void) llvm::createGVNPREPass(); + (void) llvm::createIPConstantPropagationPass(); + (void) llvm::createIPSCCPPass(); + (void) llvm::createIndVarSimplifyPass(); + (void) llvm::createInstructionCombiningPass(); + (void) llvm::createInternalizePass(false); + (void) llvm::createLCSSAPass(); + (void) llvm::createLICMPass(); + (void) llvm::createLiveValuesPass(); + (void) llvm::createLoopExtractorPass(); + (void) llvm::createLoopSimplifyPass(); + (void) llvm::createLoopStrengthReducePass(); + (void) llvm::createLoopUnrollPass(); + (void) llvm::createLoopUnswitchPass(); + (void) llvm::createLoopRotatePass(); + (void) llvm::createLoopIndexSplitPass(); + (void) llvm::createLowerAllocationsPass(); + (void) llvm::createLowerInvokePass(); + (void) llvm::createLowerSetJmpPass(); + (void) llvm::createLowerSwitchPass(); + (void) llvm::createNoAAPass(); + (void) llvm::createNoProfileInfoPass(); + (void) llvm::createProfileLoaderPass(); + (void) llvm::createPromoteMemoryToRegisterPass(); + (void) llvm::createDemoteRegisterToMemoryPass(); + (void) llvm::createPruneEHPass(); + (void) llvm::createRaiseAllocationsPass(); + (void) llvm::createReassociatePass(); + (void) llvm::createSCCPPass(); + (void) llvm::createScalarReplAggregatesPass(); + (void) llvm::createSimplifyLibCallsPass(); + (void) llvm::createSimplifyHalfPowrLibCallsPass(); + (void) llvm::createSingleLoopExtractorPass(); + (void) llvm::createStripSymbolsPass(); + (void) llvm::createStripNonDebugSymbolsPass(); + (void) llvm::createStripDeadPrototypesPass(); + (void) llvm::createTailCallEliminationPass(); + (void) llvm::createTailDuplicationPass(); + (void) llvm::createJumpThreadingPass(); + (void) llvm::createUnifyFunctionExitNodesPass(); + (void) llvm::createCondPropagationPass(); + (void) llvm::createNullProfilerRSPass(); + (void) llvm::createRSProfilingPass(); + (void) llvm::createIndMemRemPass(); + (void) llvm::createInstCountPass(); + (void) llvm::createPredicateSimplifierPass(); + (void) llvm::createCodeGenPreparePass(); + (void) llvm::createGVNPass(); + (void) llvm::createMemCpyOptPass(); + (void) llvm::createLoopDeletionPass(); + (void) llvm::createPostDomTree(); + (void) llvm::createPostDomFrontier(); + (void) llvm::createInstructionNamerPass(); + (void) llvm::createPartialSpecializationPass(); + (void) llvm::createFunctionAttrsPass(); + (void) llvm::createMergeFunctionsPass(); + (void) llvm::createPrintModulePass(0); + (void) llvm::createPrintFunctionPass("", 0); + (void) llvm::createDbgInfoPrinterPass(); + + (void)new llvm::IntervalPartition(); + (void)new llvm::FindUsedTypes(); + (void)new llvm::ScalarEvolution(); + (void)new llvm::LoopVR(); + ((llvm::Function*)0)->viewCFGOnly(); + llvm::AliasSetTracker X(*(llvm::AliasAnalysis*)0); + X.add((llvm::Value*)0, 0); // for -print-alias-sets + } + } ForcePassLinking; // Force link by creating a global definition. +} + +#endif diff --git a/include/llvm/LinkAllVMCore.h b/include/llvm/LinkAllVMCore.h new file mode 100644 index 0000000..4c428a0 --- /dev/null +++ b/include/llvm/LinkAllVMCore.h @@ -0,0 +1,55 @@ +//===- LinkAllVMCore.h - Reference All VMCore Code --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file pulls in all the object modules of the VMCore library so +// that tools like llc, opt, and lli can ensure they are linked with all symbols +// from libVMCore.a It should only be used from a tool's main program. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LINKALLVMCORE_H +#define LLVM_LINKALLVMCORE_H + +#include "llvm/Module.h" +#include "llvm/Instructions.h" +#include "llvm/IntrinsicInst.h" +#include "llvm/InlineAsm.h" +#include "llvm/Analysis/Verifier.h" +#include "llvm/System/Alarm.h" +#include "llvm/System/DynamicLibrary.h" +#include "llvm/System/Memory.h" +#include "llvm/System/Mutex.h" +#include "llvm/System/Path.h" +#include "llvm/System/Process.h" +#include "llvm/System/Program.h" +#include "llvm/System/Signals.h" +#include "llvm/System/TimeValue.h" +#include "llvm/Support/Dwarf.h" +#include "llvm/Support/Mangler.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/SlowOperationInformer.h" + +namespace { + struct ForceVMCoreLinking { + ForceVMCoreLinking() { + // We must reference VMCore in such a way that compilers will not + // delete it all as dead code, even with whole program optimization, + // yet is effectively a NO-OP. As the compiler isn't smart enough + // to know that getenv() never returns -1, this will do the job. + if (std::getenv("bar") != (char*) -1) + return; + llvm::Module* M = new llvm::Module(""); + (void)new llvm::UnreachableInst(); + (void) llvm::createVerifierPass(); + (void) new llvm::Mangler(*M,""); + } + } ForceVMCoreLinking; +} + +#endif diff --git a/include/llvm/Linker.h b/include/llvm/Linker.h new file mode 100644 index 0000000..884e872 --- /dev/null +++ b/include/llvm/Linker.h @@ -0,0 +1,297 @@ +//===- llvm/Linker.h - Module Linker Interface ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface to the module/file/archive linker. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LINKER_H +#define LLVM_LINKER_H + +#include "llvm/System/Path.h" +#include <memory> +#include <vector> + +namespace llvm { + +class Module; + +/// This class provides the core functionality of linking in LLVM. It retains a +/// Module object which is the composite of the modules and libraries linked +/// into it. The composite Module can be retrieved via the getModule() method. +/// In this case the Linker still retains ownership of the Module. If the +/// releaseModule() method is used, the ownership of the Module is transferred +/// to the caller and the Linker object is only suitable for destruction. +/// The Linker can link Modules from memory, bitcode files, or bitcode +/// archives. It retains a set of search paths in which to find any libraries +/// presented to it. By default, the linker will generate error and warning +/// messages to std::cerr but this capability can be turned off with the +/// QuietWarnings and QuietErrors flags. It can also be instructed to verbosely +/// print out the linking actions it is taking with the Verbose flag. +/// @brief The LLVM Linker. +class Linker { + + /// @name Types + /// @{ + public: + /// This type is used to pass the linkage items (libraries and files) to + /// the LinkItems function. It is composed of string/bool pairs. The string + /// provides the name of the file or library (as with the -l option). The + /// bool should be true for libraries and false for files, signifying + /// "isLibrary". + /// @brief A list of linkage items + typedef std::vector<std::pair<std::string,bool> > ItemList; + + /// This enumeration is used to control various optional features of the + /// linker. + enum ControlFlags { + Verbose = 1, ///< Print to std::cerr what steps the linker is taking + QuietWarnings = 2, ///< Don't print warnings to std::cerr. + QuietErrors = 4 ///< Don't print errors to std::cerr. + }; + + /// @} + /// @name Constructors + /// @{ + public: + /// Construct the Linker with an empty module which will be given the + /// name \p progname. \p progname will also be used for error messages. + /// @brief Construct with empty module + Linker( + const std::string& progname, ///< name of tool running linker + const std::string& modulename, ///< name of linker's end-result module + unsigned Flags = 0 ///< ControlFlags (one or more |'d together) + ); + + /// Construct the Linker with a previously defined module, \p aModule. Use + /// \p progname for the name of the program in error messages. + /// @brief Construct with existing module + Linker(const std::string& progname, Module* aModule, unsigned Flags = 0); + + /// Destruct the Linker. + /// @brief Destructor + ~Linker(); + + /// @} + /// @name Accessors + /// @{ + public: + /// This method gets the composite module into which linking is being + /// done. The Composite module starts out empty and accumulates modules + /// linked into it via the various LinkIn* methods. This method does not + /// release the Module to the caller. The Linker retains ownership and will + /// destruct the Module when the Linker is destructed. + /// @see releaseModule + /// @brief Get the linked/composite module. + Module* getModule() const { return Composite; } + + /// This method releases the composite Module into which linking is being + /// done. Ownership of the composite Module is transferred to the caller who + /// must arrange for its destruct. After this method is called, the Linker + /// terminates the linking session for the returned Module. It will no + /// longer utilize the returned Module but instead resets itself for + /// subsequent linking as if the constructor had been called. The Linker's + /// LibPaths and flags to be reset, and memory will be released. + /// @brief Release the linked/composite module. + Module* releaseModule(); + + /// This method gets the list of libraries that form the path that the + /// Linker will search when it is presented with a library name. + /// @brief Get the Linkers library path + const std::vector<sys::Path>& getLibPaths() const { return LibPaths; } + + /// This method returns an error string suitable for printing to the user. + /// The return value will be empty unless an error occurred in one of the + /// LinkIn* methods. In those cases, the LinkIn* methods will have returned + /// true, indicating an error occurred. At most one error is retained so + /// this function always returns the last error that occurred. Note that if + /// the Quiet control flag is not set, the error string will have already + /// been printed to std::cerr. + /// @brief Get the text of the last error that occurred. + const std::string& getLastError() const { return Error; } + + /// @} + /// @name Mutators + /// @{ + public: + /// Add a path to the list of paths that the Linker will search. The Linker + /// accumulates the set of libraries added + /// library paths for the target platform. The standard libraries will + /// always be searched last. The added libraries will be searched in the + /// order added. + /// @brief Add a path. + void addPath(const sys::Path& path); + + /// Add a set of paths to the list of paths that the linker will search. The + /// Linker accumulates the set of libraries added. The \p paths will be + /// added to the end of the Linker's list. Order will be retained. + /// @brief Add a set of paths. + void addPaths(const std::vector<std::string>& paths); + + /// This method augments the Linker's list of library paths with the system + /// paths of the host operating system, include LLVM_LIB_SEARCH_PATH. + /// @brief Add the system paths. + void addSystemPaths(); + + /// Control optional linker behavior by setting a group of flags. The flags + /// are defined in the ControlFlags enumeration. + /// @see ControlFlags + /// @brief Set control flags. + void setFlags(unsigned flags) { Flags = flags; } + + /// This method is the main interface to the linker. It can be used to + /// link a set of linkage items into a module. A linkage item is either a + /// file name with fully qualified path, or a library for which the Linker's + /// LibraryPath will be utilized to locate the library. The bool value in + /// the LinkItemKind should be set to true for libraries. This function + /// allows linking to preserve the order of specification associated with + /// the command line, or for other purposes. Each item will be linked in + /// turn as it occurs in \p Items. + /// @returns true if an error occurred, false otherwise + /// @see LinkItemKind + /// @see getLastError + /// @throws nothing + bool LinkInItems ( + const ItemList& Items, ///< Set of libraries/files to link in + ItemList& NativeItems ///< Output list of native files/libs + ); + + /// This function links the bitcode \p Files into the composite module. + /// Note that this does not do any linking of unresolved symbols. The \p + /// Files are all completely linked into \p HeadModule regardless of + /// unresolved symbols. This function just loads each bitcode file and + /// calls LinkInModule on them. + /// @returns true if an error occurs, false otherwise + /// @see getLastError + /// @brief Link in multiple files. + bool LinkInFiles ( + const std::vector<sys::Path> & Files ///< Files to link in + ); + + /// This function links a single bitcode file, \p File, into the composite + /// module. Note that this does not attempt to resolve symbols. This method + /// just loads the bitcode file and calls LinkInModule on it. If an error + /// occurs, the Linker's error string is set. + /// @returns true if an error occurs, false otherwise + /// @see getLastError + /// @brief Link in a single file. + bool LinkInFile( + const sys::Path& File, ///< File to link in. + bool &is_native ///< Indicates if the file is native object file + ); + + /// This function provides a way to selectively link in a set of modules, + /// found in libraries, based on the unresolved symbols in the composite + /// module. Each item in \p Libraries should be the base name of a library, + /// as if given with the -l option of a linker tool. The Linker's LibPaths + /// are searched for the \p Libraries and any found will be linked in with + /// LinkInArchive. If an error occurs, the Linker's error string is set. + /// @see LinkInArchive + /// @see getLastError + /// @returns true if an error occurs, false otherwise + /// @brief Link libraries into the module + bool LinkInLibraries ( + const std::vector<std::string> & Libraries ///< Libraries to link in + ); + + /// This function provides a way to selectively link in a set of modules, + /// found in one library, based on the unresolved symbols in the composite + /// module.The \p Library should be the base name of a library, as if given + /// with the -l option of a linker tool. The Linker's LibPaths are searched + /// for the \p Library and if found, it will be linked in with via the + /// LinkInArchive method. If an error occurs, the Linker's error string is + /// set. + /// @see LinkInArchive + /// @see getLastError + /// @returns true if an error occurs, false otherwise + /// @brief Link one library into the module + bool LinkInLibrary ( + const std::string& Library, ///< The library to link in + bool& is_native ///< Indicates if lib a native library + ); + + /// This function links one bitcode archive, \p Filename, into the module. + /// The archive is searched to resolve outstanding symbols. Any modules in + /// the archive that resolve outstanding symbols will be linked in. The + /// library is searched repeatedly until no more modules that resolve + /// symbols can be found. If an error occurs, the error string is set. + /// To speed up this function, ensure the the archive has been processed + /// llvm-ranlib or the S option was given to llvm-ar when the archive was + /// created. These tools add a symbol table to the archive which makes the + /// search for undefined symbols much faster. + /// @see getLastError + /// @returns true if an error occurs, otherwise false. + /// @brief Link in one archive. + bool LinkInArchive( + const sys::Path& Filename, ///< Filename of the archive to link + bool& is_native ///< Indicates if archive is a native archive + ); + + /// This method links the \p Src module into the Linker's Composite module + /// by calling LinkModules. All the other LinkIn* methods eventually + /// result in calling this method to link a Module into the Linker's + /// composite. + /// @see LinkModules + /// @returns True if an error occurs, false otherwise. + /// @brief Link in a module. + bool LinkInModule( + Module* Src, ///< Module linked into \p Dest + std::string* ErrorMsg = 0 /// Error/diagnostic string + ) { + return LinkModules(Composite, Src, ErrorMsg ); + } + + /// This is the heart of the linker. This method will take unconditional + /// control of the \p Src module and link it into the \p Dest module. The + /// \p Src module will be destructed or subsumed by this method. In either + /// case it is not usable by the caller after this method is invoked. Only + /// the \p Dest module will remain. The \p Src module is linked into the + /// Linker's composite module such that types, global variables, functions, + /// and etc. are matched and resolved. If an error occurs, this function + /// returns true and ErrorMsg is set to a descriptive message about the + /// error. + /// @returns True if an error occurs, false otherwise. + /// @brief Generically link two modules together. + static bool LinkModules(Module* Dest, Module* Src, std::string* ErrorMsg); + + /// This function looks through the Linker's LibPaths to find a library with + /// the name \p Filename. If the library cannot be found, the returned path + /// will be empty (i.e. sys::Path::isEmpty() will return true). + /// @returns A sys::Path to the found library + /// @brief Find a library from its short name. + sys::Path FindLib(const std::string &Filename); + + /// @} + /// @name Implementation + /// @{ + private: + /// Read in and parse the bitcode file named by FN and return the + /// Module it contains (wrapped in an auto_ptr), or 0 if an error occurs. + std::auto_ptr<Module> LoadObject(const sys::Path& FN); + + bool warning(const std::string& message); + bool error(const std::string& message); + void verbose(const std::string& message); + + /// @} + /// @name Data + /// @{ + private: + Module* Composite; ///< The composite module linked together + std::vector<sys::Path> LibPaths; ///< The library search paths + unsigned Flags; ///< Flags to control optional behavior. + std::string Error; ///< Text of error that occurred. + std::string ProgramName; ///< Name of the program being linked + /// @} + +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/MDNode.h b/include/llvm/MDNode.h new file mode 100644 index 0000000..d632e4e --- /dev/null +++ b/include/llvm/MDNode.h @@ -0,0 +1,135 @@ +//===-- llvm/Metadata.h - Constant class subclass definitions ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file contains the declarations for the subclasses of Constant, +/// which represent the different flavors of constant values that live in LLVM. +/// Note that Constants are immutable (once created they never change) and are +/// fully shared by structural equivalence. This means that two structurally +/// equivalent constants will always have the same address. Constant's are +/// created on demand as needed and never deleted: thus clients don't have to +/// worry about the lifetime of the objects. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_MDNODE_H +#define LLVM_MDNODE_H + +#include "llvm/Constant.h" +#include "llvm/Type.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/ValueHandle.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// MDNode - a tuple of other values. +/// These contain a list of the Constants that represent the metadata. The +/// operand list is always empty, query the element list instead. +/// +/// This class will attempt to keep track of values as they are modified. When +/// a value is replaced the element will be replaced with it, and when the +/// value is deleted the element is set to a null pointer. In order to preserve +/// structural equivalence while the elements mutate, the MDNode may call +/// replaceAllUsesWith on itself. Because of this, users of MDNode must use a +/// WeakVH or CallbackVH to hold the node pointer if there is a chance that one +/// of the elements held by the node may change. +/// +class MDNode : public Constant, public FoldingSetNode { + MDNode(const MDNode &); // DO NOT IMPLEMENT + + friend class ElementVH; + struct ElementVH : public CallbackVH { + MDNode *OwningNode; + + ElementVH(Value *V, MDNode *Parent) + : CallbackVH(V), OwningNode(Parent) {} + + ~ElementVH() {} + + /// deleted - Set this entry in the MDNode to 'null'. This will reallocate + /// the MDNode. + virtual void deleted() { + OwningNode->replaceElement(this->operator Value*(), 0); + } + + /// allUsesReplacedWith - Modify the MDNode by replacing this entry with + /// new_value. This will reallocate the MDNode. + virtual void allUsesReplacedWith(Value *new_value) { + OwningNode->replaceElement(this->operator Value*(), new_value); + } + }; + + void replaceElement(Value *From, Value *To); + + SmallVector<ElementVH, 4> Node; + typedef SmallVectorImpl<ElementVH>::iterator elem_iterator; +protected: + explicit MDNode(Value*const* Vals, unsigned NumVals); +public: + typedef SmallVectorImpl<ElementVH>::const_iterator const_elem_iterator; + + /// get() - Static factory methods - Return objects of the specified value. + /// + static MDNode *get(Value*const* Vals, unsigned NumVals); + + Value *getElement(unsigned i) const { + return Node[i]; + } + + unsigned getNumElements() const { + return Node.size(); + } + + bool elem_empty() const { + return Node.empty(); + } + + const_elem_iterator elem_begin() const { + return Node.begin(); + } + + const_elem_iterator elem_end() const { + return Node.end(); + } + + /// getType() specialization - Type is always MetadataTy. + /// + inline const Type *getType() const { + return Type::MetadataTy; + } + + /// isNullValue - Return true if this is the value that would be returned by + /// getNullValue. This always returns false because getNullValue will never + /// produce metadata. + virtual bool isNullValue() const { + return false; + } + + /// Profile - calculate a unique identifier for this MDNode to collapse + /// duplicates + void Profile(FoldingSetNodeID &ID) const; + + virtual void destroyConstant(); + virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) { + assert(0 && "This should never be called because MDNodes have no ops"); + abort(); + } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const MDNode *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == MDNodeVal; + } +}; + +} // end llvm namespace + +#endif diff --git a/include/llvm/Module.h b/include/llvm/Module.h new file mode 100644 index 0000000..9c8607a --- /dev/null +++ b/include/llvm/Module.h @@ -0,0 +1,423 @@ +//===-- llvm/Module.h - C++ class to represent a VM module ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// @file +/// Module.h This file contains the declarations for the Module class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_MODULE_H +#define LLVM_MODULE_H + +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/GlobalAlias.h" +#include "llvm/Support/DataTypes.h" +#include <vector> + +namespace llvm { + +class GlobalValueRefMap; // Used by ConstantVals.cpp +class FunctionType; + +template<> struct ilist_traits<Function> + : public SymbolTableListTraits<Function, Module> { + + // createSentinel is used to get hold of the node that marks the end of the + // list... (same trick used here as in ilist_traits<Instruction>) + Function *createSentinel() const { + return static_cast<Function*>(&Sentinel); + } + static void destroySentinel(Function*) {} + + Function *provideInitialHead() const { return createSentinel(); } + Function *ensureHead(Function*) const { return createSentinel(); } + static void noteHead(Function*, Function*) {} + +private: + mutable ilist_node<Function> Sentinel; +}; +template<> struct ilist_traits<GlobalVariable> + : public SymbolTableListTraits<GlobalVariable, Module> { + // createSentinel is used to create a node that marks the end of the list. + static GlobalVariable *createSentinel(); + static void destroySentinel(GlobalVariable *GV) { delete GV; } +}; +template<> struct ilist_traits<GlobalAlias> + : public SymbolTableListTraits<GlobalAlias, Module> { + // createSentinel is used to create a node that marks the end of the list. + static GlobalAlias *createSentinel(); + static void destroySentinel(GlobalAlias *GA) { delete GA; } +}; + +/// A Module instance is used to store all the information related to an +/// LLVM module. Modules are the top level container of all other LLVM +/// Intermediate Representation (IR) objects. Each module directly contains a +/// list of globals variables, a list of functions, a list of libraries (or +/// other modules) this module depends on, a symbol table, and various data +/// about the target's characteristics. +/// +/// A module maintains a GlobalValRefMap object that is used to hold all +/// constant references to global variables in the module. When a global +/// variable is destroyed, it should have no entries in the GlobalValueRefMap. +/// @brief The main container class for the LLVM Intermediate Representation. +class Module { +/// @name Types And Enumerations +/// @{ +public: + /// The type for the list of global variables. + typedef iplist<GlobalVariable> GlobalListType; + /// The type for the list of functions. + typedef iplist<Function> FunctionListType; + /// The type for the list of aliases. + typedef iplist<GlobalAlias> AliasListType; + + /// The type for the list of dependent libraries. + typedef std::vector<std::string> LibraryListType; + + /// The Global Variable iterator. + typedef GlobalListType::iterator global_iterator; + /// The Global Variable constant iterator. + typedef GlobalListType::const_iterator const_global_iterator; + + /// The Function iterators. + typedef FunctionListType::iterator iterator; + /// The Function constant iterator + typedef FunctionListType::const_iterator const_iterator; + + /// The Global Alias iterators. + typedef AliasListType::iterator alias_iterator; + /// The Global Alias constant iterator + typedef AliasListType::const_iterator const_alias_iterator; + + /// The Library list iterator. + typedef LibraryListType::const_iterator lib_iterator; + + /// An enumeration for describing the endianess of the target machine. + enum Endianness { AnyEndianness, LittleEndian, BigEndian }; + + /// An enumeration for describing the size of a pointer on the target machine. + enum PointerSize { AnyPointerSize, Pointer32, Pointer64 }; + +/// @} +/// @name Member Variables +/// @{ +private: + GlobalListType GlobalList; ///< The Global Variables in the module + FunctionListType FunctionList; ///< The Functions in the module + AliasListType AliasList; ///< The Aliases in the module + LibraryListType LibraryList; ///< The Libraries needed by the module + std::string GlobalScopeAsm; ///< Inline Asm at global scope. + ValueSymbolTable *ValSymTab; ///< Symbol table for values + TypeSymbolTable *TypeSymTab; ///< Symbol table for types + std::string ModuleID; ///< Human readable identifier for the module + std::string TargetTriple; ///< Platform target triple Module compiled on + std::string DataLayout; ///< Target data description + + friend class Constant; + +/// @} +/// @name Constructors +/// @{ +public: + /// The Module constructor. Note that there is no default constructor. You + /// must provide a name for the module upon construction. + explicit Module(const std::string &ModuleID); + /// The module destructor. This will dropAllReferences. + ~Module(); + +/// @} +/// @name Module Level Accessors +/// @{ +public: + /// Get the module identifier which is, essentially, the name of the module. + /// @returns the module identifier as a string + const std::string &getModuleIdentifier() const { return ModuleID; } + + /// Get the data layout string for the module's target platform. This encodes + /// the type sizes and alignments expected by this module. + /// @returns the data layout as a string + const std::string& getDataLayout() const { return DataLayout; } + + /// Get the target triple which is a string describing the target host. + /// @returns a string containing the target triple. + const std::string &getTargetTriple() const { return TargetTriple; } + + /// Get the target endian information. + /// @returns Endianess - an enumeration for the endianess of the target + Endianness getEndianness() const; + + /// Get the target pointer size. + /// @returns PointerSize - an enumeration for the size of the target's pointer + PointerSize getPointerSize() const; + + /// Get any module-scope inline assembly blocks. + /// @returns a string containing the module-scope inline assembly blocks. + const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; } +/// @} +/// @name Module Level Mutators +/// @{ +public: + + /// Set the module identifier. + void setModuleIdentifier(const std::string &ID) { ModuleID = ID; } + + /// Set the data layout + void setDataLayout(const std::string& DL) { DataLayout = DL; } + + /// Set the target triple. + void setTargetTriple(const std::string &T) { TargetTriple = T; } + + /// Set the module-scope inline assembly blocks. + void setModuleInlineAsm(const std::string &Asm) { GlobalScopeAsm = Asm; } + + /// Append to the module-scope inline assembly blocks, automatically + /// appending a newline to the end. + void appendModuleInlineAsm(const std::string &Asm) { + GlobalScopeAsm += Asm; + GlobalScopeAsm += '\n'; + } + +/// @} +/// @name Generic Value Accessors +/// @{ + + /// getNamedValue - Return the first global value in the module with + /// the specified name, of arbitrary type. This method returns null + /// if a global with the specified name is not found. + GlobalValue *getNamedValue(const std::string &Name) const; + GlobalValue *getNamedValue(const char *Name) const; + +/// @} +/// @name Function Accessors +/// @{ +public: + /// getOrInsertFunction - Look up the specified function in the module symbol + /// table. Four possibilities: + /// 1. If it does not exist, add a prototype for the function and return it. + /// 2. If it exists, and has a local linkage, the existing function is + /// renamed and a new one is inserted. + /// 3. Otherwise, if the existing function has the correct prototype, return + /// the existing function. + /// 4. Finally, the function exists but has the wrong prototype: return the + /// function with a constantexpr cast to the right prototype. + Constant *getOrInsertFunction(const std::string &Name, const FunctionType *T, + AttrListPtr AttributeList); + + Constant *getOrInsertFunction(const std::string &Name, const FunctionType *T); + + /// getOrInsertFunction - Look up the specified function in the module symbol + /// table. If it does not exist, add a prototype for the function and return + /// it. This function guarantees to return a constant of pointer to the + /// specified function type or a ConstantExpr BitCast of that type if the + /// named function has a different type. This version of the method takes a + /// null terminated list of function arguments, which makes it easier for + /// clients to use. + Constant *getOrInsertFunction(const std::string &Name, + AttrListPtr AttributeList, + const Type *RetTy, ...) END_WITH_NULL; + + Constant *getOrInsertFunction(const std::string &Name, const Type *RetTy, ...) + END_WITH_NULL; + + Constant *getOrInsertTargetIntrinsic(const std::string &Name, + const FunctionType *Ty, + AttrListPtr AttributeList); + + /// getFunction - Look up the specified function in the module symbol table. + /// If it does not exist, return null. + Function *getFunction(const std::string &Name) const; + Function *getFunction(const char *Name) const; + +/// @} +/// @name Global Variable Accessors +/// @{ +public: + /// getGlobalVariable - Look up the specified global variable in the module + /// symbol table. If it does not exist, return null. If AllowInternal is set + /// to true, this function will return types that have InternalLinkage. By + /// default, these types are not returned. + GlobalVariable *getGlobalVariable(const std::string &Name, + bool AllowInternal = false) const; + + /// getNamedGlobal - Return the first global variable in the module with the + /// specified name, of arbitrary type. This method returns null if a global + /// with the specified name is not found. + GlobalVariable *getNamedGlobal(const std::string &Name) const { + return getGlobalVariable(Name, true); + } + + /// getOrInsertGlobal - Look up the specified global in the module symbol + /// table. + /// 1. If it does not exist, add a declaration of the global and return it. + /// 2. Else, the global exists but has the wrong type: return the function + /// with a constantexpr cast to the right type. + /// 3. Finally, if the existing global is the correct delclaration, return + /// the existing global. + Constant *getOrInsertGlobal(const std::string &Name, const Type *Ty); + +/// @} +/// @name Global Alias Accessors +/// @{ +public: + /// getNamedAlias - Return the first global alias in the module with the + /// specified name, of arbitrary type. This method returns null if a global + /// with the specified name is not found. + GlobalAlias *getNamedAlias(const std::string &Name) const; + +/// @} +/// @name Type Accessors +/// @{ +public: + /// addTypeName - Insert an entry in the symbol table mapping Str to Type. If + /// there is already an entry for this name, true is returned and the symbol + /// table is not modified. + bool addTypeName(const std::string &Name, const Type *Ty); + + /// getTypeName - If there is at least one entry in the symbol table for the + /// specified type, return it. + std::string getTypeName(const Type *Ty) const; + + /// getTypeByName - Return the type with the specified name in this module, or + /// null if there is none by that name. + const Type *getTypeByName(const std::string &Name) const; + +/// @} +/// @name Direct access to the globals list, functions list, and symbol table +/// @{ +public: + /// Get the Module's list of global variables (constant). + const GlobalListType &getGlobalList() const { return GlobalList; } + /// Get the Module's list of global variables. + GlobalListType &getGlobalList() { return GlobalList; } + static iplist<GlobalVariable> Module::*getSublistAccess(GlobalVariable*) { + return &Module::GlobalList; + } + /// Get the Module's list of functions (constant). + const FunctionListType &getFunctionList() const { return FunctionList; } + /// Get the Module's list of functions. + FunctionListType &getFunctionList() { return FunctionList; } + static iplist<Function> Module::*getSublistAccess(Function*) { + return &Module::FunctionList; + } + /// Get the Module's list of aliases (constant). + const AliasListType &getAliasList() const { return AliasList; } + /// Get the Module's list of aliases. + AliasListType &getAliasList() { return AliasList; } + static iplist<GlobalAlias> Module::*getSublistAccess(GlobalAlias*) { + return &Module::AliasList; + } + /// Get the symbol table of global variable and function identifiers + const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; } + /// Get the Module's symbol table of global variable and function identifiers. + ValueSymbolTable &getValueSymbolTable() { return *ValSymTab; } + /// Get the symbol table of types + const TypeSymbolTable &getTypeSymbolTable() const { return *TypeSymTab; } + /// Get the Module's symbol table of types + TypeSymbolTable &getTypeSymbolTable() { return *TypeSymTab; } + +/// @} +/// @name Global Variable Iteration +/// @{ +public: + /// Get an iterator to the first global variable + global_iterator global_begin() { return GlobalList.begin(); } + /// Get a constant iterator to the first global variable + const_global_iterator global_begin() const { return GlobalList.begin(); } + /// Get an iterator to the last global variable + global_iterator global_end () { return GlobalList.end(); } + /// Get a constant iterator to the last global variable + const_global_iterator global_end () const { return GlobalList.end(); } + /// Determine if the list of globals is empty. + bool global_empty() const { return GlobalList.empty(); } + +/// @} +/// @name Function Iteration +/// @{ +public: + /// Get an iterator to the first function. + iterator begin() { return FunctionList.begin(); } + /// Get a constant iterator to the first function. + const_iterator begin() const { return FunctionList.begin(); } + /// Get an iterator to the last function. + iterator end () { return FunctionList.end(); } + /// Get a constant iterator to the last function. + const_iterator end () const { return FunctionList.end(); } + /// Determine how many functions are in the Module's list of functions. + size_t size() const { return FunctionList.size(); } + /// Determine if the list of functions is empty. + bool empty() const { return FunctionList.empty(); } + +/// @} +/// @name Dependent Library Iteration +/// @{ +public: + /// @brief Get a constant iterator to beginning of dependent library list. + inline lib_iterator lib_begin() const { return LibraryList.begin(); } + /// @brief Get a constant iterator to end of dependent library list. + inline lib_iterator lib_end() const { return LibraryList.end(); } + /// @brief Returns the number of items in the list of libraries. + inline size_t lib_size() const { return LibraryList.size(); } + /// @brief Add a library to the list of dependent libraries + void addLibrary(const std::string& Lib); + /// @brief Remove a library from the list of dependent libraries + void removeLibrary(const std::string& Lib); + /// @brief Get all the libraries + inline const LibraryListType& getLibraries() const { return LibraryList; } + +/// @} +/// @name Alias Iteration +/// @{ +public: + /// Get an iterator to the first alias. + alias_iterator alias_begin() { return AliasList.begin(); } + /// Get a constant iterator to the first alias. + const_alias_iterator alias_begin() const { return AliasList.begin(); } + /// Get an iterator to the last alias. + alias_iterator alias_end () { return AliasList.end(); } + /// Get a constant iterator to the last alias. + const_alias_iterator alias_end () const { return AliasList.end(); } + /// Determine how many functions are in the Module's list of aliases. + size_t alias_size () const { return AliasList.size(); } + /// Determine if the list of aliases is empty. + bool alias_empty() const { return AliasList.empty(); } + +/// @} +/// @name Utility functions for printing and dumping Module objects +/// @{ +public: + /// Print the module to an output stream with AssemblyAnnotationWriter. + void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW) const; + void print(std::ostream &OS, AssemblyAnnotationWriter *AAW) const; + + /// Dump the module to stderr (for debugging). + void dump() const; + /// This function causes all the subinstructions to "let go" of all references + /// that they are maintaining. This allows one to 'delete' a whole class at + /// a time, even though there may be circular references... first all + /// references are dropped, and all use counts go to zero. Then everything + /// is delete'd for real. Note that no operations are valid on an object + /// that has "dropped all references", except operator delete. + void dropAllReferences(); +/// @} +}; + +/// An iostream inserter for modules. +inline std::ostream &operator<<(std::ostream &O, const Module &M) { + M.print(O, 0); + return O; +} +inline raw_ostream &operator<<(raw_ostream &O, const Module &M) { + M.print(O, 0); + return O; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/ModuleProvider.h b/include/llvm/ModuleProvider.h new file mode 100644 index 0000000..8a0a20c --- /dev/null +++ b/include/llvm/ModuleProvider.h @@ -0,0 +1,88 @@ +//===-- llvm/ModuleProvider.h - Interface for module providers --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides an abstract interface for loading a module from some +// place. This interface allows incremental or random access loading of +// functions from the file. This is useful for applications like JIT compilers +// or interprocedural optimizers that do not need the entire program in memory +// at the same time. +// +//===----------------------------------------------------------------------===// + +#ifndef MODULEPROVIDER_H +#define MODULEPROVIDER_H + +#include <string> + +namespace llvm { + +class Function; +class Module; + +class ModuleProvider { +protected: + Module *TheModule; + ModuleProvider(); + +public: + virtual ~ModuleProvider(); + + /// getModule - returns the module this provider is encapsulating. + /// + Module* getModule() { return TheModule; } + + /// materializeFunction - make sure the given function is fully read. If the + /// module is corrupt, this returns true and fills in the optional string + /// with information about the problem. If successful, this returns false. + /// + virtual bool materializeFunction(Function *F, std::string *ErrInfo = 0) = 0; + + /// dematerializeFunction - If the given function is read in, and if the + /// module provider supports it, release the memory for the function, and set + /// it up to be materialized lazily. If the provider doesn't support this + /// capability, this method is a noop. + /// + virtual void dematerializeFunction(Function *) {} + + /// materializeModule - make sure the entire Module has been completely read. + /// On error, return null and fill in the error string if specified. + /// + virtual Module* materializeModule(std::string *ErrInfo = 0) = 0; + + /// releaseModule - no longer delete the Module* when provider is destroyed. + /// On error, return null and fill in the error string if specified. + /// + virtual Module* releaseModule(std::string *ErrInfo = 0) { + // Since we're losing control of this Module, we must hand it back complete + if (!materializeModule(ErrInfo)) + return 0; + Module *tempM = TheModule; + TheModule = 0; + return tempM; + } +}; + + +/// ExistingModuleProvider - Allow conversion from a fully materialized Module +/// into a ModuleProvider, allowing code that expects a ModuleProvider to work +/// if we just have a Module. Note that the ModuleProvider takes ownership of +/// the Module specified. +struct ExistingModuleProvider : public ModuleProvider { + explicit ExistingModuleProvider(Module *M) { + TheModule = M; + } + bool materializeFunction(Function *, std::string * = 0) { + return false; + } + Module* materializeModule(std::string * = 0) { return TheModule; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/OperandTraits.h b/include/llvm/OperandTraits.h new file mode 100644 index 0000000..83c1025 --- /dev/null +++ b/include/llvm/OperandTraits.h @@ -0,0 +1,207 @@ +//===-- llvm/OperandTraits.h - OperandTraits class definition ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the traits classes that are handy for enforcing the correct +// layout of various User subclasses. It also provides the means for accessing +// the operands in the most efficient manner. +// + +#ifndef LLVM_OPERAND_TRAITS_H +#define LLVM_OPERAND_TRAITS_H + +#include "llvm/User.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +// FixedNumOperands Trait Class +//===----------------------------------------------------------------------===// + +/// FixedNumOperandTraits - determine the allocation regime of the Use array +/// when it is a prefix to the User object, and the number of Use objects is +/// known at compile time. + +template <unsigned ARITY> +struct FixedNumOperandTraits { + static Use *op_begin(User* U) { + return reinterpret_cast<Use*>(U) - ARITY; + } + static Use *op_end(User* U) { + return reinterpret_cast<Use*>(U); + } + static unsigned operands(const User*) { + return ARITY; + } + struct prefix { + Use Ops[ARITY]; + prefix(); // DO NOT IMPLEMENT + }; + template <class U> + struct Layout { + struct overlay : prefix, U { + overlay(); // DO NOT IMPLEMENT + }; + }; + static inline void *allocate(unsigned); // FIXME +}; + +//===----------------------------------------------------------------------===// +// OptionalOperands Trait Class +//===----------------------------------------------------------------------===// + +template <unsigned ARITY = 1> +struct OptionalOperandTraits : FixedNumOperandTraits<ARITY> { + static unsigned operands(const User *U) { + return U->getNumOperands(); + } +}; + +//===----------------------------------------------------------------------===// +// VariadicOperand Trait Class +//===----------------------------------------------------------------------===// + +/// VariadicOperandTraits - determine the allocation regime of the Use array +/// when it is a prefix to the User object, and the number of Use objects is +/// only known at allocation time. + +template <unsigned MINARITY = 0> +struct VariadicOperandTraits { + static Use *op_begin(User* U) { + return reinterpret_cast<Use*>(U) - U->getNumOperands(); + } + static Use *op_end(User* U) { + return reinterpret_cast<Use*>(U); + } + static unsigned operands(const User *U) { + return U->getNumOperands(); + } + static inline void *allocate(unsigned); // FIXME +}; + +//===----------------------------------------------------------------------===// +// HungoffOperand Trait Class +//===----------------------------------------------------------------------===// + +/// HungoffOperandTraits - determine the allocation regime of the Use array +/// when it is not a prefix to the User object, but allocated at an unrelated +/// heap address. +/// Assumes that the User subclass that is determined by this traits class +/// has an OperandList member of type User::op_iterator. [Note: this is now +/// trivially satisfied, because User has that member for historic reasons.] +/// +/// This is the traits class that is needed when the Use array must be +/// resizable. + +template <unsigned MINARITY = 1> +struct HungoffOperandTraits { + static Use *op_begin(User* U) { + return U->OperandList; + } + static Use *op_end(User* U) { + return U->OperandList + U->getNumOperands(); + } + static unsigned operands(const User *U) { + return U->getNumOperands(); + } + static inline void *allocate(unsigned); // FIXME +}; + +/// Macro for generating in-class operand accessor declarations. +/// It should only be called in the public section of the interface. +/// +#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS) \ + public: \ + inline VALUECLASS *getOperand(unsigned) const; \ + inline void setOperand(unsigned, VALUECLASS*); \ + inline op_iterator op_begin(); \ + inline const_op_iterator op_begin() const; \ + inline op_iterator op_end(); \ + inline const_op_iterator op_end() const; \ + protected: \ + template <int> inline Use &Op(); \ + template <int> inline const Use &Op() const; \ + public: \ + inline unsigned getNumOperands() const + +/// Macro for generating out-of-class operand accessor definitions +#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS) \ +CLASS::op_iterator CLASS::op_begin() { \ + return OperandTraits<CLASS>::op_begin(this); \ +} \ +CLASS::const_op_iterator CLASS::op_begin() const { \ + return OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this)); \ +} \ +CLASS::op_iterator CLASS::op_end() { \ + return OperandTraits<CLASS>::op_end(this); \ +} \ +CLASS::const_op_iterator CLASS::op_end() const { \ + return OperandTraits<CLASS>::op_end(const_cast<CLASS*>(this)); \ +} \ +VALUECLASS *CLASS::getOperand(unsigned i_nocapture) const { \ + assert(i_nocapture < OperandTraits<CLASS>::operands(this) \ + && "getOperand() out of range!"); \ + return static_cast<VALUECLASS*>( \ + OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this))[i_nocapture]); \ +} \ +void CLASS::setOperand(unsigned i_nocapture, VALUECLASS *Val_nocapture) { \ + assert(i_nocapture < OperandTraits<CLASS>::operands(this) \ + && "setOperand() out of range!"); \ + OperandTraits<CLASS>::op_begin(this)[i_nocapture] = Val_nocapture; \ +} \ +unsigned CLASS::getNumOperands() const { \ + return OperandTraits<CLASS>::operands(this); \ +} \ +template <int Idx_nocapture> Use &CLASS::Op() { \ + return this->OpFrom<Idx_nocapture>(this); \ +} \ +template <int Idx_nocapture> const Use &CLASS::Op() const { \ + return this->OpFrom<Idx_nocapture>(this); \ +} + + +/// Macro for generating out-of-class operand accessor +/// definitions with casted result +#define DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(CLASS, VALUECLASS) \ +CLASS::op_iterator CLASS::op_begin() { \ + return OperandTraits<CLASS>::op_begin(this); \ +} \ +CLASS::const_op_iterator CLASS::op_begin() const { \ + return OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this)); \ +} \ +CLASS::op_iterator CLASS::op_end() { \ + return OperandTraits<CLASS>::op_end(this); \ +} \ +CLASS::const_op_iterator CLASS::op_end() const { \ + return OperandTraits<CLASS>::op_end(const_cast<CLASS*>(this)); \ +} \ +VALUECLASS *CLASS::getOperand(unsigned i_nocapture) const { \ + assert(i_nocapture < OperandTraits<CLASS>::operands(this) \ + && "getOperand() out of range!"); \ + return cast<VALUECLASS>( \ + OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this))[i_nocapture]); \ +} \ +void CLASS::setOperand(unsigned i_nocapture, VALUECLASS *Val_nocapture) { \ + assert(i_nocapture < OperandTraits<CLASS>::operands(this) \ + && "setOperand() out of range!"); \ + OperandTraits<CLASS>::op_begin(this)[i_nocapture] = Val_nocapture; \ +} \ +unsigned CLASS::getNumOperands() const { \ + return OperandTraits<CLASS>::operands(this); \ +} \ +template <int Idx_nocapture> Use &CLASS::Op() { \ + return this->OpFrom<Idx_nocapture>(this); \ +} \ +template <int Idx_nocapture> const Use &CLASS::Op() const { \ + return this->OpFrom<Idx_nocapture>(this); \ +} + + +} // End llvm namespace + +#endif diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h new file mode 100644 index 0000000..923de65 --- /dev/null +++ b/include/llvm/Pass.h @@ -0,0 +1,380 @@ +//===- llvm/Pass.h - Base class for Passes ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a base class that indicates that a specified class is a +// transformation pass implementation. +// +// Passes are designed this way so that it is possible to run passes in a cache +// and organizationally optimal order without having to specify it at the front +// end. This allows arbitrary passes to be strung together and have them +// executed as effeciently as possible. +// +// Passes should extend one of the classes below, depending on the guarantees +// that it can make about what will be modified as it is run. For example, most +// global optimizations should derive from FunctionPass, because they do not add +// or delete functions, they operate on the internals of the function. +// +// Note that this file #includes PassSupport.h and PassAnalysisSupport.h (at the +// bottom), so the APIs exposed by these files are also automatically available +// to all users of this file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_PASS_H +#define LLVM_PASS_H + +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/Streams.h" +#include <cassert> +#include <iosfwd> +#include <utility> +#include <vector> + +namespace llvm { + +class BasicBlock; +class Function; +class Module; +class AnalysisUsage; +class PassInfo; +class ImmutablePass; +class PMStack; +class AnalysisResolver; +class PMDataManager; + +// AnalysisID - Use the PassInfo to identify a pass... +typedef const PassInfo* AnalysisID; + +/// Different types of internal pass managers. External pass managers +/// (PassManager and FunctionPassManager) are not represented here. +/// Ordering of pass manager types is important here. +enum PassManagerType { + PMT_Unknown = 0, + PMT_ModulePassManager = 1, /// MPPassManager + PMT_CallGraphPassManager, /// CGPassManager + PMT_FunctionPassManager, /// FPPassManager + PMT_LoopPassManager, /// LPPassManager + PMT_BasicBlockPassManager, /// BBPassManager + PMT_Last +}; + +//===----------------------------------------------------------------------===// +/// Pass interface - Implemented by all 'passes'. Subclass this if you are an +/// interprocedural optimization or you do not fit into any of the more +/// constrained passes described below. +/// +class Pass { + AnalysisResolver *Resolver; // Used to resolve analysis + intptr_t PassID; + + void operator=(const Pass&); // DO NOT IMPLEMENT + Pass(const Pass &); // DO NOT IMPLEMENT +public: + explicit Pass(intptr_t pid) : Resolver(0), PassID(pid) { + assert(pid && "pid cannot be 0"); + } + explicit Pass(const void *pid) : Resolver(0), PassID((intptr_t)pid) { + assert(pid && "pid cannot be 0"); + } + virtual ~Pass(); + + /// getPassName - Return a nice clean name for a pass. This usually + /// implemented in terms of the name that is registered by one of the + /// Registration templates, but can be overloaded directly. + /// + virtual const char *getPassName() const; + + /// getPassInfo - Return the PassInfo data structure that corresponds to this + /// pass... If the pass has not been registered, this will return null. + /// + const PassInfo *getPassInfo() const; + + /// print - Print out the internal state of the pass. This is called by + /// Analyze to print out the contents of an analysis. Otherwise it is not + /// necessary to implement this method. Beware that the module pointer MAY be + /// null. This automatically forwards to a virtual function that does not + /// provide the Module* in case the analysis doesn't need it it can just be + /// ignored. + /// + virtual void print(std::ostream &O, const Module *M) const; + void print(std::ostream *O, const Module *M) const { if (O) print(*O, M); } + void dump() const; // dump - call print(std::cerr, 0); + + /// Each pass is responsible for assigning a pass manager to itself. + /// PMS is the stack of available pass manager. + virtual void assignPassManager(PMStack &, + PassManagerType = PMT_Unknown) {} + /// Check if available pass managers are suitable for this pass or not. + virtual void preparePassManager(PMStack &) {} + + /// Return what kind of Pass Manager can manage this pass. + virtual PassManagerType getPotentialPassManagerType() const { + return PMT_Unknown; + } + + // Access AnalysisResolver + inline void setResolver(AnalysisResolver *AR) { + assert (!Resolver && "Resolver is already set"); + Resolver = AR; + } + inline AnalysisResolver *getResolver() { + return Resolver; + } + + /// getAnalysisUsage - This function should be overriden by passes that need + /// analysis information to do their job. If a pass specifies that it uses a + /// particular analysis result to this function, it can then use the + /// getAnalysis<AnalysisType>() function, below. + /// + virtual void getAnalysisUsage(AnalysisUsage &) const { + // By default, no analysis results are used, all are invalidated. + } + + /// releaseMemory() - This member can be implemented by a pass if it wants to + /// be able to release its memory when it is no longer needed. The default + /// behavior of passes is to hold onto memory for the entire duration of their + /// lifetime (which is the entire compile time). For pipelined passes, this + /// is not a big deal because that memory gets recycled every time the pass is + /// invoked on another program unit. For IP passes, it is more important to + /// free memory when it is unused. + /// + /// Optionally implement this function to release pass memory when it is no + /// longer used. + /// + virtual void releaseMemory() {} + + /// verifyAnalysis() - This member can be implemented by a analysis pass to + /// check state of analysis information. + virtual void verifyAnalysis() const {} + + // dumpPassStructure - Implement the -debug-passes=PassStructure option + virtual void dumpPassStructure(unsigned Offset = 0); + + template<typename AnalysisClass> + static const PassInfo *getClassPassInfo() { + return lookupPassInfo(intptr_t(&AnalysisClass::ID)); + } + + // lookupPassInfo - Return the pass info object for the specified pass class, + // or null if it is not known. + static const PassInfo *lookupPassInfo(intptr_t TI); + + /// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to + /// get analysis information that might be around, for example to update it. + /// This is different than getAnalysis in that it can fail (if the analysis + /// results haven't been computed), so should only be used if you can handle + /// the case when the analysis is not available. This method is often used by + /// transformation APIs to update analysis results for a pass automatically as + /// the transform is performed. + /// + template<typename AnalysisType> AnalysisType * + getAnalysisIfAvailable() const; // Defined in PassAnalysisSupport.h + + /// mustPreserveAnalysisID - This method serves the same function as + /// getAnalysisIfAvailable, but works if you just have an AnalysisID. This + /// obviously cannot give you a properly typed instance of the class if you + /// don't have the class name available (use getAnalysisIfAvailable if you + /// do), but it can tell you if you need to preserve the pass at least. + /// + bool mustPreserveAnalysisID(const PassInfo *AnalysisID) const; + + /// getAnalysis<AnalysisType>() - This function is used by subclasses to get + /// to the analysis information that they claim to use by overriding the + /// getAnalysisUsage function. + /// + template<typename AnalysisType> + AnalysisType &getAnalysis() const; // Defined in PassAnalysisSupport.h + + template<typename AnalysisType> + AnalysisType &getAnalysis(Function &F); // Defined in PassanalysisSupport.h + + template<typename AnalysisType> + AnalysisType &getAnalysisID(const PassInfo *PI) const; + + template<typename AnalysisType> + AnalysisType &getAnalysisID(const PassInfo *PI, Function &F); +}; + +inline std::ostream &operator<<(std::ostream &OS, const Pass &P) { + P.print(OS, 0); return OS; +} + +//===----------------------------------------------------------------------===// +/// ModulePass class - This class is used to implement unstructured +/// interprocedural optimizations and analyses. ModulePasses may do anything +/// they want to the program. +/// +class ModulePass : public Pass { +public: + /// runOnModule - Virtual method overriden by subclasses to process the module + /// being operated on. + virtual bool runOnModule(Module &M) = 0; + + virtual void assignPassManager(PMStack &PMS, + PassManagerType T = PMT_ModulePassManager); + + /// Return what kind of Pass Manager can manage this pass. + virtual PassManagerType getPotentialPassManagerType() const { + return PMT_ModulePassManager; + } + + explicit ModulePass(intptr_t pid) : Pass(pid) {} + explicit ModulePass(const void *pid) : Pass(pid) {} + // Force out-of-line virtual method. + virtual ~ModulePass(); +}; + + +//===----------------------------------------------------------------------===// +/// ImmutablePass class - This class is used to provide information that does +/// not need to be run. This is useful for things like target information and +/// "basic" versions of AnalysisGroups. +/// +class ImmutablePass : public ModulePass { +public: + /// initializePass - This method may be overriden by immutable passes to allow + /// them to perform various initialization actions they require. This is + /// primarily because an ImmutablePass can "require" another ImmutablePass, + /// and if it does, the overloaded version of initializePass may get access to + /// these passes with getAnalysis<>. + /// + virtual void initializePass() {} + + /// ImmutablePasses are never run. + /// + bool runOnModule(Module &) { return false; } + + explicit ImmutablePass(intptr_t pid) : ModulePass(pid) {} + explicit ImmutablePass(const void *pid) + : ModulePass(pid) {} + + // Force out-of-line virtual method. + virtual ~ImmutablePass(); +}; + +//===----------------------------------------------------------------------===// +/// FunctionPass class - This class is used to implement most global +/// optimizations. Optimizations should subclass this class if they meet the +/// following constraints: +/// +/// 1. Optimizations are organized globally, i.e., a function at a time +/// 2. Optimizing a function does not cause the addition or removal of any +/// functions in the module +/// +class FunctionPass : public Pass { +public: + explicit FunctionPass(intptr_t pid) : Pass(pid) {} + explicit FunctionPass(const void *pid) : Pass(pid) {} + + /// doInitialization - Virtual method overridden by subclasses to do + /// any necessary per-module initialization. + /// + virtual bool doInitialization(Module &) { return false; } + + /// runOnFunction - Virtual method overriden by subclasses to do the + /// per-function processing of the pass. + /// + virtual bool runOnFunction(Function &F) = 0; + + /// doFinalization - Virtual method overriden by subclasses to do any post + /// processing needed after all passes have run. + /// + virtual bool doFinalization(Module &) { return false; } + + /// runOnModule - On a module, we run this pass by initializing, + /// ronOnFunction'ing once for every function in the module, then by + /// finalizing. + /// + virtual bool runOnModule(Module &M); + + /// run - On a function, we simply initialize, run the function, then + /// finalize. + /// + bool run(Function &F); + + virtual void assignPassManager(PMStack &PMS, + PassManagerType T = PMT_FunctionPassManager); + + /// Return what kind of Pass Manager can manage this pass. + virtual PassManagerType getPotentialPassManagerType() const { + return PMT_FunctionPassManager; + } +}; + + + +//===----------------------------------------------------------------------===// +/// BasicBlockPass class - This class is used to implement most local +/// optimizations. Optimizations should subclass this class if they +/// meet the following constraints: +/// 1. Optimizations are local, operating on either a basic block or +/// instruction at a time. +/// 2. Optimizations do not modify the CFG of the contained function, or any +/// other basic block in the function. +/// 3. Optimizations conform to all of the constraints of FunctionPasses. +/// +class BasicBlockPass : public Pass { +public: + explicit BasicBlockPass(intptr_t pid) : Pass(pid) {} + explicit BasicBlockPass(const void *pid) : Pass(pid) {} + + /// doInitialization - Virtual method overridden by subclasses to do + /// any necessary per-module initialization. + /// + virtual bool doInitialization(Module &) { return false; } + + /// doInitialization - Virtual method overridden by BasicBlockPass subclasses + /// to do any necessary per-function initialization. + /// + virtual bool doInitialization(Function &) { return false; } + + /// runOnBasicBlock - Virtual method overriden by subclasses to do the + /// per-basicblock processing of the pass. + /// + virtual bool runOnBasicBlock(BasicBlock &BB) = 0; + + /// doFinalization - Virtual method overriden by BasicBlockPass subclasses to + /// do any post processing needed after all passes have run. + /// + virtual bool doFinalization(Function &) { return false; } + + /// doFinalization - Virtual method overriden by subclasses to do any post + /// processing needed after all passes have run. + /// + virtual bool doFinalization(Module &) { return false; } + + + // To run this pass on a function, we simply call runOnBasicBlock once for + // each function. + // + bool runOnFunction(Function &F); + + virtual void assignPassManager(PMStack &PMS, + PassManagerType T = PMT_BasicBlockPassManager); + + /// Return what kind of Pass Manager can manage this pass. + virtual PassManagerType getPotentialPassManagerType() const { + return PMT_BasicBlockPassManager; + } +}; + +/// If the user specifies the -time-passes argument on an LLVM tool command line +/// then the value of this boolean will be true, otherwise false. +/// @brief This is the storage for the -time-passes option. +extern bool TimePassesIsEnabled; + +} // End llvm namespace + +// Include support files that contain important APIs commonly used by Passes, +// but that we want to separate out to make it easier to read the header files. +// +#include "llvm/PassSupport.h" +#include "llvm/PassAnalysisSupport.h" + +#endif diff --git a/include/llvm/PassAnalysisSupport.h b/include/llvm/PassAnalysisSupport.h new file mode 100644 index 0000000..b09ba45 --- /dev/null +++ b/include/llvm/PassAnalysisSupport.h @@ -0,0 +1,248 @@ +//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines stuff that is used to define and "use" Analysis Passes. +// This file is automatically #included by Pass.h, so: +// +// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY +// +// Instead, #include Pass.h +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_PASS_ANALYSIS_SUPPORT_H +#define LLVM_PASS_ANALYSIS_SUPPORT_H + +#include <vector> +#include "llvm/ADT/SmallVector.h" + +namespace llvm { + +// No need to include Pass.h, we are being included by it! + +//===----------------------------------------------------------------------===// +// AnalysisUsage - Represent the analysis usage information of a pass. This +// tracks analyses that the pass REQUIRES (must be available when the pass +// runs), REQUIRES TRANSITIVE (must be available throughout the lifetime of the +// pass), and analyses that the pass PRESERVES (the pass does not invalidate the +// results of these analyses). This information is provided by a pass to the +// Pass infrastructure through the getAnalysisUsage virtual function. +// +class AnalysisUsage { +public: + typedef SmallVector<AnalysisID, 32> VectorType; + +private: + // Sets of analyses required and preserved by a pass + VectorType Required, RequiredTransitive, Preserved; + bool PreservesAll; + +public: + AnalysisUsage() : PreservesAll(false) {} + + // addRequired - Add the specified ID to the required set of the usage info + // for a pass. + // + AnalysisUsage &addRequiredID(AnalysisID ID) { + assert(ID && "Pass class not registered!"); + Required.push_back(ID); + return *this; + } + template<class PassClass> + AnalysisUsage &addRequired() { + return addRequiredID(Pass::getClassPassInfo<PassClass>()); + } + + AnalysisUsage &addRequiredTransitiveID(AnalysisID ID) { + assert(ID && "Pass class not registered!"); + Required.push_back(ID); + RequiredTransitive.push_back(ID); + return *this; + } + template<class PassClass> + AnalysisUsage &addRequiredTransitive() { + AnalysisID ID = Pass::getClassPassInfo<PassClass>(); + return addRequiredTransitiveID(ID); + } + + // addPreserved - Add the specified ID to the set of analyses preserved by + // this pass + // + AnalysisUsage &addPreservedID(AnalysisID ID) { + Preserved.push_back(ID); + return *this; + } + + template<class PassClass> + AnalysisUsage &addPreserved() { + assert(Pass::getClassPassInfo<PassClass>() && "Pass class not registered!"); + Preserved.push_back(Pass::getClassPassInfo<PassClass>()); + return *this; + } + + // setPreservesAll - Set by analyses that do not transform their input at all + void setPreservesAll() { PreservesAll = true; } + bool getPreservesAll() const { return PreservesAll; } + + /// setPreservesCFG - This function should be called by the pass, iff they do + /// not: + /// + /// 1. Add or remove basic blocks from the function + /// 2. Modify terminator instructions in any way. + /// + /// This function annotates the AnalysisUsage info object to say that analyses + /// that only depend on the CFG are preserved by this pass. + /// + void setPreservesCFG(); + + const VectorType &getRequiredSet() const { return Required; } + const VectorType &getRequiredTransitiveSet() const { + return RequiredTransitive; + } + const VectorType &getPreservedSet() const { return Preserved; } +}; + +//===----------------------------------------------------------------------===// +// AnalysisResolver - Simple interface used by Pass objects to pull all +// analysis information out of pass manager that is responsible to manage +// the pass. +// +class PMDataManager; +class AnalysisResolver { +private: + AnalysisResolver(); // DO NOT IMPLEMENT + +public: + explicit AnalysisResolver(PMDataManager &P) : PM(P) { } + + inline PMDataManager &getPMDataManager() { return PM; } + + // Find pass that is implementing PI. + Pass *findImplPass(const PassInfo *PI) { + Pass *ResultPass = 0; + for (unsigned i = 0; i < AnalysisImpls.size() ; ++i) { + if (AnalysisImpls[i].first == PI) { + ResultPass = AnalysisImpls[i].second; + break; + } + } + return ResultPass; + } + + // Find pass that is implementing PI. Initialize pass for Function F. + Pass *findImplPass(Pass *P, const PassInfo *PI, Function &F); + + void addAnalysisImplsPair(const PassInfo *PI, Pass *P) { + std::pair<const PassInfo*, Pass*> pir = std::make_pair(PI,P); + AnalysisImpls.push_back(pir); + } + + /// clearAnalysisImpls - Clear cache that is used to connect a pass to the + /// the analysis (PassInfo). + void clearAnalysisImpls() { + AnalysisImpls.clear(); + } + + // getAnalysisIfAvailable - Return analysis result or null if it doesn't exist + Pass *getAnalysisIfAvailable(AnalysisID ID, bool Direction) const; + + // AnalysisImpls - This keeps track of which passes implements the interfaces + // that are required by the current pass (to implement getAnalysis()). + std::vector<std::pair<const PassInfo*, Pass*> > AnalysisImpls; + +private: + // PassManager that is used to resolve analysis info + PMDataManager &PM; +}; + +/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to +/// get analysis information that might be around, for example to update it. +/// This is different than getAnalysis in that it can fail (if the analysis +/// results haven't been computed), so should only be used if you can handle +/// the case when the analysis is not available. This method is often used by +/// transformation APIs to update analysis results for a pass automatically as +/// the transform is performed. +/// +template<typename AnalysisType> +AnalysisType *Pass::getAnalysisIfAvailable() const { + assert(Resolver && "Pass not resident in a PassManager object!"); + + const PassInfo *PI = getClassPassInfo<AnalysisType>(); + if (PI == 0) return 0; + return dynamic_cast<AnalysisType*> + (Resolver->getAnalysisIfAvailable(PI, true)); +} + +/// getAnalysis<AnalysisType>() - This function is used by subclasses to get +/// to the analysis information that they claim to use by overriding the +/// getAnalysisUsage function. +/// +template<typename AnalysisType> +AnalysisType &Pass::getAnalysis() const { + assert(Resolver &&"Pass has not been inserted into a PassManager object!"); + + return getAnalysisID<AnalysisType>(getClassPassInfo<AnalysisType>()); +} + +template<typename AnalysisType> +AnalysisType &Pass::getAnalysisID(const PassInfo *PI) const { + assert(PI && "getAnalysis for unregistered pass!"); + assert(Resolver&&"Pass has not been inserted into a PassManager object!"); + // PI *must* appear in AnalysisImpls. Because the number of passes used + // should be a small number, we just do a linear search over a (dense) + // vector. + Pass *ResultPass = Resolver->findImplPass(PI); + assert (ResultPass && + "getAnalysis*() called on an analysis that was not " + "'required' by pass!"); + + // Because the AnalysisType may not be a subclass of pass (for + // AnalysisGroups), we must use dynamic_cast here to potentially adjust the + // return pointer (because the class may multiply inherit, once from pass, + // once from AnalysisType). + // + AnalysisType *Result = dynamic_cast<AnalysisType*>(ResultPass); + assert(Result && "Pass does not implement interface required!"); + return *Result; +} + +/// getAnalysis<AnalysisType>() - This function is used by subclasses to get +/// to the analysis information that they claim to use by overriding the +/// getAnalysisUsage function. +/// +template<typename AnalysisType> +AnalysisType &Pass::getAnalysis(Function &F) { + assert(Resolver &&"Pass has not been inserted into a PassManager object!"); + + return getAnalysisID<AnalysisType>(getClassPassInfo<AnalysisType>(), F); +} + +template<typename AnalysisType> +AnalysisType &Pass::getAnalysisID(const PassInfo *PI, Function &F) { + assert(PI && "getAnalysis for unregistered pass!"); + assert(Resolver && "Pass has not been inserted into a PassManager object!"); + // PI *must* appear in AnalysisImpls. Because the number of passes used + // should be a small number, we just do a linear search over a (dense) + // vector. + Pass *ResultPass = Resolver->findImplPass(this, PI, F); + assert (ResultPass && "Unable to find requested analysis info"); + + // Because the AnalysisType may not be a subclass of pass (for + // AnalysisGroups), we must use dynamic_cast here to potentially adjust the + // return pointer (because the class may multiply inherit, once from pass, + // once from AnalysisType). + // + AnalysisType *Result = dynamic_cast<AnalysisType*>(ResultPass); + assert(Result && "Pass does not implement interface required!"); + return *Result; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/PassManager.h b/include/llvm/PassManager.h new file mode 100644 index 0000000..a6703fd --- /dev/null +++ b/include/llvm/PassManager.h @@ -0,0 +1,112 @@ +//===- llvm/PassManager.h - Container for Passes ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PassManager class. This class is used to hold, +// maintain, and optimize execution of Passes. The PassManager class ensures +// that analysis results are available before a pass runs, and that Pass's are +// destroyed when the PassManager is destroyed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_PASSMANAGER_H +#define LLVM_PASSMANAGER_H + +#include "llvm/Pass.h" + +namespace llvm { + +class Pass; +class ModulePass; +class Module; +class ModuleProvider; + +class PassManagerImpl; +class FunctionPassManagerImpl; + +/// PassManagerBase - An abstract interface to allow code to add passes to +/// a pass manager without having to hard-code what kind of pass manager +/// it is. +class PassManagerBase { +public: + virtual ~PassManagerBase(); + + /// add - Add a pass to the queue of passes to run. This passes ownership of + /// the Pass to the PassManager. When the PassManager is destroyed, the pass + /// will be destroyed as well, so there is no need to delete the pass. This + /// implies that all passes MUST be allocated with 'new'. + virtual void add(Pass *P) = 0; +}; + +/// PassManager manages ModulePassManagers +class PassManager : public PassManagerBase { +public: + + PassManager(); + ~PassManager(); + + /// add - Add a pass to the queue of passes to run. This passes ownership of + /// the Pass to the PassManager. When the PassManager is destroyed, the pass + /// will be destroyed as well, so there is no need to delete the pass. This + /// implies that all passes MUST be allocated with 'new'. + void add(Pass *P); + + /// run - Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the module, and if so, return true. + bool run(Module &M); + +private: + + /// PassManagerImpl_New is the actual class. PassManager is just the + /// wraper to publish simple pass manager interface + PassManagerImpl *PM; +}; + +/// FunctionPassManager manages FunctionPasses and BasicBlockPassManagers. +class FunctionPassManager : public PassManagerBase { +public: + /// FunctionPassManager ctor - This initializes the pass manager. It needs, + /// but does not take ownership of, the specified module provider. + explicit FunctionPassManager(ModuleProvider *P); + ~FunctionPassManager(); + + /// add - Add a pass to the queue of passes to run. This passes + /// ownership of the Pass to the PassManager. When the + /// PassManager_X is destroyed, the pass will be destroyed as well, so + /// there is no need to delete the pass. (TODO delete passes.) + /// This implies that all passes MUST be allocated with 'new'. + void add(Pass *P); + + /// run - Execute all of the passes scheduled for execution. Keep + /// track of whether any of the passes modifies the function, and if + /// so, return true. + /// + bool run(Function &F); + + /// doInitialization - Run all of the initializers for the function passes. + /// + bool doInitialization(); + + /// doFinalization - Run all of the finalizers for the function passes. + /// + bool doFinalization(); + + /// getModuleProvider - Return the module provider that this passmanager is + /// currently using. This is the module provider that it uses when a function + /// is optimized that is non-resident in the module. + ModuleProvider *getModuleProvider() const { return MP; } + void setModuleProvider(ModuleProvider *NewMP) { MP = NewMP; } + +private: + FunctionPassManagerImpl *FPM; + ModuleProvider *MP; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/PassManagers.h b/include/llvm/PassManagers.h new file mode 100644 index 0000000..1aa0d3a --- /dev/null +++ b/include/llvm/PassManagers.h @@ -0,0 +1,457 @@ +//===- llvm/PassManagers.h - Pass Infrastructure classes -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the LLVM Pass Manager infrastructure. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_PASSMANAGERS_H +#define LLVM_PASSMANAGERS_H + +#include "llvm/PassManager.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/DenseMap.h" +#include <deque> +#include <map> + +//===----------------------------------------------------------------------===// +// Overview: +// The Pass Manager Infrastructure manages passes. It's responsibilities are: +// +// o Manage optimization pass execution order +// o Make required Analysis information available before pass P is run +// o Release memory occupied by dead passes +// o If Analysis information is dirtied by a pass then regenerate Analysis +// information before it is consumed by another pass. +// +// Pass Manager Infrastructure uses multiple pass managers. They are +// PassManager, FunctionPassManager, MPPassManager, FPPassManager, BBPassManager. +// This class hierarchy uses multiple inheritance but pass managers do not +// derive from another pass manager. +// +// PassManager and FunctionPassManager are two top-level pass manager that +// represents the external interface of this entire pass manager infrastucture. +// +// Important classes : +// +// [o] class PMTopLevelManager; +// +// Two top level managers, PassManager and FunctionPassManager, derive from +// PMTopLevelManager. PMTopLevelManager manages information used by top level +// managers such as last user info. +// +// [o] class PMDataManager; +// +// PMDataManager manages information, e.g. list of available analysis info, +// used by a pass manager to manage execution order of passes. It also provides +// a place to implement common pass manager APIs. All pass managers derive from +// PMDataManager. +// +// [o] class BBPassManager : public FunctionPass, public PMDataManager; +// +// BBPassManager manages BasicBlockPasses. +// +// [o] class FunctionPassManager; +// +// This is a external interface used by JIT to manage FunctionPasses. This +// interface relies on FunctionPassManagerImpl to do all the tasks. +// +// [o] class FunctionPassManagerImpl : public ModulePass, PMDataManager, +// public PMTopLevelManager; +// +// FunctionPassManagerImpl is a top level manager. It manages FPPassManagers +// +// [o] class FPPassManager : public ModulePass, public PMDataManager; +// +// FPPassManager manages FunctionPasses and BBPassManagers +// +// [o] class MPPassManager : public Pass, public PMDataManager; +// +// MPPassManager manages ModulePasses and FPPassManagers +// +// [o] class PassManager; +// +// This is a external interface used by various tools to manages passes. It +// relies on PassManagerImpl to do all the tasks. +// +// [o] class PassManagerImpl : public Pass, public PMDataManager, +// public PMDTopLevelManager +// +// PassManagerImpl is a top level pass manager responsible for managing +// MPPassManagers. +//===----------------------------------------------------------------------===// + +#include "llvm/Support/PrettyStackTrace.h" + +namespace llvm { + class Pass; + class Value; + class Module; + +/// FunctionPassManager and PassManager, two top level managers, serve +/// as the public interface of pass manager infrastructure. +enum TopLevelManagerType { + TLM_Function, // FunctionPassManager + TLM_Pass // PassManager +}; + +// enums for debugging strings +enum PassDebuggingString { + EXECUTION_MSG, // "Executing Pass '" + MODIFICATION_MSG, // "' Made Modification '" + FREEING_MSG, // " Freeing Pass '" + ON_BASICBLOCK_MSG, // "' on BasicBlock '" + PassName + "'...\n" + ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n" + ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n" + ON_LOOP_MSG, // " 'on Loop ...\n'" + ON_CG_MSG // "' on Call Graph ...\n'" +}; + +/// PassManagerPrettyStackEntry - This is used to print informative information +/// about what pass is running when/if a stack trace is generated. +class PassManagerPrettyStackEntry : public PrettyStackTraceEntry { + Pass *P; + Value *V; + Module *M; +public: + PassManagerPrettyStackEntry(Pass *p) + : P(p), V(0), M(0) {} // When P is releaseMemory'd. + PassManagerPrettyStackEntry(Pass *p, Value &v) + : P(p), V(&v), M(0) {} // When P is run on V + PassManagerPrettyStackEntry(Pass *p, Module &m) + : P(p), V(0), M(&m) {} // When P is run on M + + /// print - Emit information about this stack frame to OS. + virtual void print(raw_ostream &OS) const; +}; + + +//===----------------------------------------------------------------------===// +// PMStack +// +/// PMStack +/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers +/// using PMStack. Each Pass implements assignPassManager() to connect itself +/// with appropriate manager. assignPassManager() walks PMStack to find +/// suitable manager. +/// +/// PMStack is just a wrapper around standard deque that overrides pop() and +/// push() methods. +class PMStack { +public: + typedef std::deque<PMDataManager *>::reverse_iterator iterator; + iterator begin() { return S.rbegin(); } + iterator end() { return S.rend(); } + + void handleLastUserOverflow(); + + void pop(); + inline PMDataManager *top() { return S.back(); } + void push(PMDataManager *PM); + inline bool empty() { return S.empty(); } + + void dump(); +private: + std::deque<PMDataManager *> S; +}; + + +//===----------------------------------------------------------------------===// +// PMTopLevelManager +// +/// PMTopLevelManager manages LastUser info and collects common APIs used by +/// top level pass managers. +class PMTopLevelManager { +public: + + virtual unsigned getNumContainedManagers() const { + return (unsigned)PassManagers.size(); + } + + /// Schedule pass P for execution. Make sure that passes required by + /// P are run before P is run. Update analysis info maintained by + /// the manager. Remove dead passes. This is a recursive function. + void schedulePass(Pass *P); + + /// This is implemented by top level pass manager and used by + /// schedulePass() to add analysis info passes that are not available. + virtual void addTopLevelPass(Pass *P) = 0; + + /// Set pass P as the last user of the given analysis passes. + void setLastUser(SmallVector<Pass *, 12> &AnalysisPasses, Pass *P); + + /// Collect passes whose last user is P + void collectLastUses(SmallVector<Pass *, 12> &LastUses, Pass *P); + + /// Find the pass that implements Analysis AID. Search immutable + /// passes and all pass managers. If desired pass is not found + /// then return NULL. + Pass *findAnalysisPass(AnalysisID AID); + + /// Find analysis usage information for the pass P. + AnalysisUsage *findAnalysisUsage(Pass *P); + + explicit PMTopLevelManager(enum TopLevelManagerType t); + virtual ~PMTopLevelManager(); + + /// Add immutable pass and initialize it. + inline void addImmutablePass(ImmutablePass *P) { + P->initializePass(); + ImmutablePasses.push_back(P); + } + + inline SmallVector<ImmutablePass *, 8>& getImmutablePasses() { + return ImmutablePasses; + } + + void addPassManager(PMDataManager *Manager) { + PassManagers.push_back(Manager); + } + + // Add Manager into the list of managers that are not directly + // maintained by this top level pass manager + inline void addIndirectPassManager(PMDataManager *Manager) { + IndirectPassManagers.push_back(Manager); + } + + // Print passes managed by this top level manager. + void dumpPasses() const; + void dumpArguments() const; + + void initializeAllAnalysisInfo(); + + // Active Pass Managers + PMStack activeStack; + +protected: + + /// Collection of pass managers + SmallVector<PMDataManager *, 8> PassManagers; + +private: + + /// Collection of pass managers that are not directly maintained + /// by this pass manager + SmallVector<PMDataManager *, 8> IndirectPassManagers; + + // Map to keep track of last user of the analysis pass. + // LastUser->second is the last user of Lastuser->first. + DenseMap<Pass *, Pass *> LastUser; + + // Map to keep track of passes that are last used by a pass. + // This inverse map is initialized at PM->run() based on + // LastUser map. + DenseMap<Pass *, SmallPtrSet<Pass *, 8> > InversedLastUser; + + /// Immutable passes are managed by top level manager. + SmallVector<ImmutablePass *, 8> ImmutablePasses; + + DenseMap<Pass *, AnalysisUsage *> AnUsageMap; +}; + + + +//===----------------------------------------------------------------------===// +// PMDataManager + +/// PMDataManager provides the common place to manage the analysis data +/// used by pass managers. +class PMDataManager { +public: + + explicit PMDataManager(int Depth) : TPM(NULL), Depth(Depth) { + initializeAnalysisInfo(); + } + + virtual ~PMDataManager(); + + /// Augment AvailableAnalysis by adding analysis made available by pass P. + void recordAvailableAnalysis(Pass *P); + + /// verifyPreservedAnalysis -- Verify analysis presreved by pass P. + void verifyPreservedAnalysis(Pass *P); + + /// verifyDomInfo -- Verify dominator information if it is available. + void verifyDomInfo(Pass &P, Function &F); + + /// Remove Analysis that is not preserved by the pass + void removeNotPreservedAnalysis(Pass *P); + + /// Remove dead passes + void removeDeadPasses(Pass *P, const char *Msg, enum PassDebuggingString); + + /// Add pass P into the PassVector. Update + /// AvailableAnalysis appropriately if ProcessAnalysis is true. + void add(Pass *P, bool ProcessAnalysis = true); + + /// Add RequiredPass into list of lower level passes required by pass P. + /// RequiredPass is run on the fly by Pass Manager when P requests it + /// through getAnalysis interface. + virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass); + + virtual Pass * getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) { + assert (0 && "Unable to find on the fly pass"); + return NULL; + } + + /// Initialize available analysis information. + void initializeAnalysisInfo() { + AvailableAnalysis.clear(); + for (unsigned i = 0; i < PMT_Last; ++i) + InheritedAnalysis[i] = NULL; + } + + // Return true if P preserves high level analysis used by other + // passes that are managed by this manager. + bool preserveHigherLevelAnalysis(Pass *P); + + + /// Populate RequiredPasses with analysis pass that are required by + /// pass P and are available. Populate ReqPassNotAvailable with analysis + /// pass that are required by pass P but are not available. + void collectRequiredAnalysis(SmallVector<Pass *, 8> &RequiredPasses, + SmallVector<AnalysisID, 8> &ReqPassNotAvailable, + Pass *P); + + /// All Required analyses should be available to the pass as it runs! Here + /// we fill in the AnalysisImpls member of the pass so that it can + /// successfully use the getAnalysis() method to retrieve the + /// implementations it needs. + void initializeAnalysisImpl(Pass *P); + + /// Find the pass that implements Analysis AID. If desired pass is not found + /// then return NULL. + Pass *findAnalysisPass(AnalysisID AID, bool Direction); + + // Access toplevel manager + PMTopLevelManager *getTopLevelManager() { return TPM; } + void setTopLevelManager(PMTopLevelManager *T) { TPM = T; } + + unsigned getDepth() const { return Depth; } + + // Print routines used by debug-pass + void dumpLastUses(Pass *P, unsigned Offset) const; + void dumpPassArguments() const; + void dumpPassInfo(Pass *P, enum PassDebuggingString S1, + enum PassDebuggingString S2, const char *Msg); + void dumpRequiredSet(const Pass *P) const; + void dumpPreservedSet(const Pass *P) const; + + virtual unsigned getNumContainedPasses() const { + return (unsigned)PassVector.size(); + } + + virtual PassManagerType getPassManagerType() const { + assert ( 0 && "Invalid use of getPassManagerType"); + return PMT_Unknown; + } + + std::map<AnalysisID, Pass*> *getAvailableAnalysis() { + return &AvailableAnalysis; + } + + // Collect AvailableAnalysis from all the active Pass Managers. + void populateInheritedAnalysis(PMStack &PMS) { + unsigned Index = 0; + for (PMStack::iterator I = PMS.begin(), E = PMS.end(); + I != E; ++I) + InheritedAnalysis[Index++] = (*I)->getAvailableAnalysis(); + } + +protected: + + // Top level manager. + PMTopLevelManager *TPM; + + // Collection of pass that are managed by this manager + SmallVector<Pass *, 16> PassVector; + + // Collection of Analysis provided by Parent pass manager and + // used by current pass manager. At at time there can not be more + // then PMT_Last active pass mangers. + std::map<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last]; + +private: + void dumpAnalysisUsage(const char *Msg, const Pass *P, + const AnalysisUsage::VectorType &Set) const; + + // Set of available Analysis. This information is used while scheduling + // pass. If a pass requires an analysis which is not not available then + // equired analysis pass is scheduled to run before the pass itself is + // scheduled to run. + std::map<AnalysisID, Pass*> AvailableAnalysis; + + // Collection of higher level analysis used by the pass managed by + // this manager. + SmallVector<Pass *, 8> HigherLevelAnalysis; + + unsigned Depth; +}; + +//===----------------------------------------------------------------------===// +// FPPassManager +// +/// FPPassManager manages BBPassManagers and FunctionPasses. +/// It batches all function passes and basic block pass managers together and +/// sequence them to process one function at a time before processing next +/// function. + +class FPPassManager : public ModulePass, public PMDataManager { + +public: + static char ID; + explicit FPPassManager(int Depth) + : ModulePass(&ID), PMDataManager(Depth) { } + + /// run - Execute all of the passes scheduled for execution. Keep track of + /// whether any of the passes modifies the module, and if so, return true. + bool runOnFunction(Function &F); + bool runOnModule(Module &M); + + /// cleanup - After running all passes, clean up pass manager cache. + void cleanup(); + + /// doInitialization - Run all of the initializers for the function passes. + /// + bool doInitialization(Module &M); + + /// doFinalization - Run all of the finalizers for the function passes. + /// + bool doFinalization(Module &M); + + /// Pass Manager itself does not invalidate any analysis info. + void getAnalysisUsage(AnalysisUsage &Info) const { + Info.setPreservesAll(); + } + + // Print passes managed by this manager + void dumpPassStructure(unsigned Offset); + + virtual const char *getPassName() const { + return "Function Pass Manager"; + } + + FunctionPass *getContainedPass(unsigned N) { + assert ( N < PassVector.size() && "Pass number out of range!"); + FunctionPass *FP = static_cast<FunctionPass *>(PassVector[N]); + return FP; + } + + virtual PassManagerType getPassManagerType() const { + return PMT_FunctionPassManager; + } +}; + +} + +extern void StartPassTimer(llvm::Pass *); +extern void StopPassTimer(llvm::Pass *); + +#endif diff --git a/include/llvm/PassSupport.h b/include/llvm/PassSupport.h new file mode 100644 index 0000000..fe3ca52 --- /dev/null +++ b/include/llvm/PassSupport.h @@ -0,0 +1,256 @@ +//===- llvm/PassSupport.h - Pass Support code -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines stuff that is used to define and "use" Passes. This file +// is automatically #included by Pass.h, so: +// +// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY +// +// Instead, #include Pass.h. +// +// This file defines Pass registration code and classes used for it. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_PASS_SUPPORT_H +#define LLVM_PASS_SUPPORT_H + +// No need to include Pass.h, we are being included by it! + +namespace llvm { + +class TargetMachine; + +//===--------------------------------------------------------------------------- +/// PassInfo class - An instance of this class exists for every pass known by +/// the system, and can be obtained from a live Pass by calling its +/// getPassInfo() method. These objects are set up by the RegisterPass<> +/// template, defined below. +/// +class PassInfo { +public: + typedef Pass* (*NormalCtor_t)(); + +private: + const char *const PassName; // Nice name for Pass + const char *const PassArgument; // Command Line argument to run this pass + const intptr_t PassID; + const bool IsCFGOnlyPass; // Pass only looks at the CFG. + const bool IsAnalysis; // True if an analysis pass. + const bool IsAnalysisGroup; // True if an analysis group. + std::vector<const PassInfo*> ItfImpl;// Interfaces implemented by this pass + + NormalCtor_t NormalCtor; + +public: + /// PassInfo ctor - Do not call this directly, this should only be invoked + /// through RegisterPass. + PassInfo(const char *name, const char *arg, intptr_t pi, + NormalCtor_t normal = 0, + bool isCFGOnly = false, bool is_analysis = false) + : PassName(name), PassArgument(arg), PassID(pi), + IsCFGOnlyPass(isCFGOnly), + IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) { + registerPass(); + } + /// PassInfo ctor - Do not call this directly, this should only be invoked + /// through RegisterPass. This version is for use by analysis groups; it + /// does not auto-register the pass. + PassInfo(const char *name, intptr_t pi) + : PassName(name), PassArgument(""), PassID(pi), + IsCFGOnlyPass(false), + IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(0) { + } + + /// getPassName - Return the friendly name for the pass, never returns null + /// + const char *getPassName() const { return PassName; } + + /// getPassArgument - Return the command line option that may be passed to + /// 'opt' that will cause this pass to be run. This will return null if there + /// is no argument. + /// + const char *getPassArgument() const { return PassArgument; } + + /// getTypeInfo - Return the id object for the pass... + /// TODO : Rename + intptr_t getTypeInfo() const { return PassID; } + + /// isAnalysisGroup - Return true if this is an analysis group, not a normal + /// pass. + /// + bool isAnalysisGroup() const { return IsAnalysisGroup; } + bool isAnalysis() const { return IsAnalysis; } + + /// isCFGOnlyPass - return true if this pass only looks at the CFG for the + /// function. + bool isCFGOnlyPass() const { return IsCFGOnlyPass; } + + /// getNormalCtor - Return a pointer to a function, that when called, creates + /// an instance of the pass and returns it. This pointer may be null if there + /// is no default constructor for the pass. + /// + NormalCtor_t getNormalCtor() const { + return NormalCtor; + } + void setNormalCtor(NormalCtor_t Ctor) { + NormalCtor = Ctor; + } + + /// createPass() - Use this method to create an instance of this pass. + Pass *createPass() const { + assert((!isAnalysisGroup() || NormalCtor) && + "No default implementation found for analysis group!"); + assert(NormalCtor && + "Cannot call createPass on PassInfo without default ctor!"); + return NormalCtor(); + } + + /// addInterfaceImplemented - This method is called when this pass is + /// registered as a member of an analysis group with the RegisterAnalysisGroup + /// template. + /// + void addInterfaceImplemented(const PassInfo *ItfPI) { + ItfImpl.push_back(ItfPI); + } + + /// getInterfacesImplemented - Return a list of all of the analysis group + /// interfaces implemented by this pass. + /// + const std::vector<const PassInfo*> &getInterfacesImplemented() const { + return ItfImpl; + } + +protected: + void registerPass(); + void unregisterPass(); + +private: + void operator=(const PassInfo &); // do not implement + PassInfo(const PassInfo &); // do not implement +}; + + +template<typename PassName> +Pass *callDefaultCtor() { return new PassName(); } + +//===--------------------------------------------------------------------------- +/// RegisterPass<t> template - This template class is used to notify the system +/// that a Pass is available for use, and registers it into the internal +/// database maintained by the PassManager. Unless this template is used, opt, +/// for example will not be able to see the pass and attempts to create the pass +/// will fail. This template is used in the follow manner (at global scope, in +/// your .cpp file): +/// +/// static RegisterPass<YourPassClassName> tmp("passopt", "My Pass Name"); +/// +/// This statement will cause your pass to be created by calling the default +/// constructor exposed by the pass. If you have a different constructor that +/// must be called, create a global constructor function (which takes the +/// arguments you need and returns a Pass*) and register your pass like this: +/// +/// static RegisterPass<PassClassName> tmp("passopt", "My Name"); +/// +template<typename passName> +struct RegisterPass : public PassInfo { + + // Register Pass using default constructor... + RegisterPass(const char *PassArg, const char *Name, bool CFGOnly = false, + bool is_analysis = false) + : PassInfo(Name, PassArg, intptr_t(&passName::ID), + PassInfo::NormalCtor_t(callDefaultCtor<passName>), + CFGOnly, is_analysis) { + } +}; + + +/// RegisterAnalysisGroup - Register a Pass as a member of an analysis _group_. +/// Analysis groups are used to define an interface (which need not derive from +/// Pass) that is required by passes to do their job. Analysis Groups differ +/// from normal analyses because any available implementation of the group will +/// be used if it is available. +/// +/// If no analysis implementing the interface is available, a default +/// implementation is created and added. A pass registers itself as the default +/// implementation by specifying 'true' as the second template argument of this +/// class. +/// +/// In addition to registering itself as an analysis group member, a pass must +/// register itself normally as well. Passes may be members of multiple groups +/// and may still be "required" specifically by name. +/// +/// The actual interface may also be registered as well (by not specifying the +/// second template argument). The interface should be registered to associate +/// a nice name with the interface. +/// +class RegisterAGBase : public PassInfo { + PassInfo *InterfaceInfo; + const PassInfo *ImplementationInfo; + bool isDefaultImplementation; +protected: + explicit RegisterAGBase(const char *Name, + intptr_t InterfaceID, + intptr_t PassID = 0, + bool isDefault = false); +}; + +template<typename Interface, bool Default = false> +struct RegisterAnalysisGroup : public RegisterAGBase { + explicit RegisterAnalysisGroup(PassInfo &RPB) + : RegisterAGBase(RPB.getPassName(), + intptr_t(&Interface::ID), RPB.getTypeInfo(), + Default) { + } + + explicit RegisterAnalysisGroup(const char *Name) + : RegisterAGBase(Name, intptr_t(&Interface::ID)) { + } +}; + + + +//===--------------------------------------------------------------------------- +/// PassRegistrationListener class - This class is meant to be derived from by +/// clients that are interested in which passes get registered and unregistered +/// at runtime (which can be because of the RegisterPass constructors being run +/// as the program starts up, or may be because a shared object just got +/// loaded). Deriving from the PassRegistationListener class automatically +/// registers your object to receive callbacks indicating when passes are loaded +/// and removed. +/// +struct PassRegistrationListener { + + /// PassRegistrationListener ctor - Add the current object to the list of + /// PassRegistrationListeners... + PassRegistrationListener(); + + /// dtor - Remove object from list of listeners... + /// + virtual ~PassRegistrationListener(); + + /// Callback functions - These functions are invoked whenever a pass is loaded + /// or removed from the current executable. + /// + virtual void passRegistered(const PassInfo *) {} + + /// enumeratePasses - Iterate over the registered passes, calling the + /// passEnumerate callback on each PassInfo object. + /// + void enumeratePasses(); + + /// passEnumerate - Callback function invoked when someone calls + /// enumeratePasses on this PassRegistrationListener object. + /// + virtual void passEnumerate(const PassInfo *) {} +}; + + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/AIXDataTypesFix.h b/include/llvm/Support/AIXDataTypesFix.h new file mode 100644 index 0000000..a9a9147 --- /dev/null +++ b/include/llvm/Support/AIXDataTypesFix.h @@ -0,0 +1,25 @@ +//===-- llvm/Support/AIXDataTypesFix.h - Fix datatype defs ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file overrides default system-defined types and limits which cannot be +// done in DataTypes.h.in because it is processed by autoheader first, which +// comments out any #undef statement +// +//===----------------------------------------------------------------------===// + +// No include guards desired! + +#ifndef SUPPORT_DATATYPES_H +#error "AIXDataTypesFix.h must only be included via DataTypes.h!" +#endif + +// GCC is strict about defining large constants: they must have LL modifier. +// These will be defined properly at the end of DataTypes.h +#undef INT64_MAX +#undef INT64_MIN diff --git a/include/llvm/Support/AlignOf.h b/include/llvm/Support/AlignOf.h new file mode 100644 index 0000000..6a7a1a6 --- /dev/null +++ b/include/llvm/Support/AlignOf.h @@ -0,0 +1,60 @@ +//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the AlignOf function that computes alignments for +// arbitrary types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_ALIGNOF_H +#define LLVM_SUPPORT_ALIGNOF_H + +namespace llvm { + +template <typename T> +struct AlignmentCalcImpl { + char x; + T t; +private: + AlignmentCalcImpl() {} // Never instantiate. +}; + +/// AlignOf - A templated class that contains an enum value representing +/// the alignment of the template argument. For example, +/// AlignOf<int>::Alignment represents the alignment of type "int". The +/// alignment calculated is the minimum alignment, and not necessarily +/// the "desired" alignment returned by GCC's __alignof__ (for example). Note +/// that because the alignment is an enum value, it can be used as a +/// compile-time constant (e.g., for template instantiation). +template <typename T> +struct AlignOf { + enum { Alignment = + static_cast<unsigned int>(sizeof(AlignmentCalcImpl<T>) - sizeof(T)) }; + + enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 }; + enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 }; + enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 }; + enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 }; + + enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 }; + enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 }; + enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 }; + enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 }; + +}; + +/// alignof - A templated function that returns the mininum alignment of +/// of a type. This provides no extra functionality beyond the AlignOf +/// class besides some cosmetic cleanliness. Example usage: +/// alignof<int>() returns the alignment of an int. +template <typename T> +static inline unsigned alignof() { return AlignOf<T>::Alignment; } + +} // end namespace llvm +#endif diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h new file mode 100644 index 0000000..c0414f9 --- /dev/null +++ b/include/llvm/Support/Allocator.h @@ -0,0 +1,91 @@ +//===--- Allocator.h - Simple memory allocation abstraction -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MallocAllocator and BumpPtrAllocator interfaces. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_ALLOCATOR_H +#define LLVM_SUPPORT_ALLOCATOR_H + +#include "llvm/Support/AlignOf.h" +#include <cstdlib> + +namespace llvm { + +class MallocAllocator { +public: + MallocAllocator() {} + ~MallocAllocator() {} + + void Reset() {} + + void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); } + + template <typename T> + T *Allocate() { return static_cast<T*>(malloc(sizeof(T))); } + + template <typename T> + T *Allocate(size_t Num) { + return static_cast<T*>(malloc(sizeof(T)*Num)); + } + + void Deallocate(const void *Ptr) { free(const_cast<void*>(Ptr)); } + + void PrintStats() const {} +}; + +/// BumpPtrAllocator - This allocator is useful for containers that need very +/// simple memory allocation strategies. In particular, this just keeps +/// allocating memory, and never deletes it until the entire block is dead. This +/// makes allocation speedy, but must only be used when the trade-off is ok. +class BumpPtrAllocator { + BumpPtrAllocator(const BumpPtrAllocator &); // do not implement + void operator=(const BumpPtrAllocator &); // do not implement + + void *TheMemory; +public: + BumpPtrAllocator(); + ~BumpPtrAllocator(); + + void Reset(); + + void *Allocate(size_t Size, size_t Alignment); + + /// Allocate space, but do not construct, one object. + /// + template <typename T> + T *Allocate() { + return static_cast<T*>(Allocate(sizeof(T),AlignOf<T>::Alignment)); + } + + /// Allocate space for an array of objects. This does not construct the + /// objects though. + template <typename T> + T *Allocate(size_t Num) { + return static_cast<T*>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment)); + } + + /// Allocate space for a specific count of elements and with a specified + /// alignment. + template <typename T> + T *Allocate(size_t Num, size_t Alignment) { + // Round EltSize up to the specified alignment. + size_t EltSize = (sizeof(T)+Alignment-1)&(-Alignment); + return static_cast<T*>(Allocate(Num * EltSize, Alignment)); + } + + void Deallocate(const void * /*Ptr*/) {} + + void PrintStats() const; +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/Annotation.h b/include/llvm/Support/Annotation.h new file mode 100644 index 0000000..ffc9296 --- /dev/null +++ b/include/llvm/Support/Annotation.h @@ -0,0 +1,216 @@ +//===-- llvm/Support/Annotation.h - Annotation classes ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations for two classes: Annotation & Annotable. +// Using these two simple classes, anything that derives from Annotable can have +// Annotation subclasses attached to them, ready for easy retrieval. +// +// Annotations are designed to be easily attachable to various classes. +// +// The AnnotationManager class is essential for using these classes. It is +// responsible for turning Annotation name strings into tokens [unique id #'s] +// that may be used to search for and create annotations. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_ANNOTATION_H +#define LLVM_SUPPORT_ANNOTATION_H + +#include <cassert> + +namespace llvm { + +class AnnotationID; +class Annotation; +class Annotable; +struct AnnotationManager; + +//===----------------------------------------------------------------------===// +// +// AnnotationID - This class is a thin wrapper around an unsigned integer that +// is used to hopefully prevent errors using AnnotationID's. They may be copied +// freely around and passed byvalue with little or no overhead. +// +class AnnotationID { + friend struct AnnotationManager; + unsigned ID; + + AnnotationID(); // Default ctor is disabled + + // AnnotationID is only creatable from AnnMgr. + explicit inline AnnotationID(unsigned i) : ID(i) {} +public: + inline AnnotationID(const AnnotationID &A) : ID(A.ID) {} + + inline bool operator==(const AnnotationID &A) const { + return A.ID == ID; + } + inline bool operator<(const AnnotationID &A) const { + return ID < A.ID; + } +}; + + +//===----------------------------------------------------------------------===// +// +// Annotation Class - This class serves as a base class for any specific +// annotations that you might need. Simply subclass this to add extra +// information to the annotations. +// +class Annotation { + friend class Annotable; // Annotable manipulates Next list + AnnotationID ID; // ID number, as obtained from AnnotationManager + Annotation *Next; // The next annotation in the linked list +public: + explicit inline Annotation(AnnotationID id) : ID(id), Next(0) {} + virtual ~Annotation(); // Designed to be subclassed + + // getID - Return the unique ID# of this annotation + inline AnnotationID getID() const { return ID; } + + // getNext - Return the next annotation in the list... + inline Annotation *getNext() const { return Next; } +}; + + +//===----------------------------------------------------------------------===// +// +// Annotable - This class is used as a base class for all objects that would +// like to have annotation capability. +// +// Annotable objects keep their annotation list sorted as annotations are +// inserted and deleted. This is used to ensure that annotations with identical +// ID#'s are stored sequentially. +// +class Annotable { + mutable Annotation *AnnotationList; + + Annotable(const Annotable &); // Do not implement + void operator=(const Annotable &); // Do not implement +public: + Annotable() : AnnotationList(0) {} + ~Annotable(); + + // getAnnotation - Search the list for annotations of the specified ID. The + // pointer returned is either null (if no annotations of the specified ID + // exist), or it points to the first element of a potentially list of elements + // with identical ID #'s. + // + Annotation *getAnnotation(AnnotationID ID) const { + for (Annotation *A = AnnotationList; A; A = A->getNext()) + if (A->getID() == ID) return A; + return 0; + } + + // getOrCreateAnnotation - Search through the annotation list, if there is + // no annotation with the specified ID, then use the AnnotationManager to + // create one. + // + inline Annotation *getOrCreateAnnotation(AnnotationID ID) const; + + // addAnnotation - Insert the annotation into the list in a sorted location. + // + void addAnnotation(Annotation *A) const { + assert(A->Next == 0 && "Annotation already in list?!?"); + + Annotation **AL = &AnnotationList; + while (*AL && (*AL)->ID < A->getID()) // Find where to insert annotation + AL = &((*AL)->Next); + A->Next = *AL; // Link the annotation in + *AL = A; + } + + // unlinkAnnotation - Remove the first annotation of the specified ID... and + // then return the unlinked annotation. The annotation object is not deleted. + // + inline Annotation *unlinkAnnotation(AnnotationID ID) const { + for (Annotation **A = &AnnotationList; *A; A = &((*A)->Next)) + if ((*A)->getID() == ID) { + Annotation *Ret = *A; + *A = Ret->Next; + Ret->Next = 0; + return Ret; + } + return 0; + } + + // deleteAnnotation - Delete the first annotation of the specified ID in the + // list. Unlink unlinkAnnotation, this actually deletes the annotation object + // + bool deleteAnnotation(AnnotationID ID) const { + Annotation *A = unlinkAnnotation(ID); + delete A; + return A != 0; + } +}; + + +//===----------------------------------------------------------------------===// +// +// AnnotationManager - This class is primarily responsible for maintaining a +// one-to-one mapping between string Annotation names and Annotation ID numbers. +// +// Compared to the rest of the Annotation system, these mapping methods are +// relatively slow, so they should be avoided by locally caching Annotation +// ID #'s. These methods are safe to call at any time, even by static ctors, so +// they should be used by static ctors most of the time. +// +// This class also provides support for annotations that are created on demand +// by the Annotable::getOrCreateAnnotation method. To get this to work, simply +// register an annotation handler +// +struct AnnotationManager { + typedef Annotation *(*Factory)(AnnotationID, const Annotable *, void*); + + //===--------------------------------------------------------------------===// + // Basic ID <-> Name map functionality + + static AnnotationID getID(const char *Name); // Name -> ID + static const char *getName(AnnotationID ID); // ID -> Name + + // getID - Name -> ID + registration of a factory function for demand driven + // annotation support. + static AnnotationID getID(const char *Name, Factory Fact, void *Data = 0); + + //===--------------------------------------------------------------------===// + // Annotation creation on demand support... + + // registerAnnotationFactory - This method is used to register a callback + // function used to create an annotation on demand if it is needed by the + // Annotable::getOrCreateAnnotation method. + // + static void registerAnnotationFactory(AnnotationID ID, Factory Func, + void *ExtraData = 0); + + // createAnnotation - Create an annotation of the specified ID for the + // specified object, using a register annotation creation function. + // + static Annotation *createAnnotation(AnnotationID ID, const Annotable *Obj); +}; + + + +// getOrCreateAnnotation - Search through the annotation list, if there is +// no annotation with the specified ID, then use the AnnotationManager to +// create one. +// +inline Annotation *Annotable::getOrCreateAnnotation(AnnotationID ID) const { + Annotation *A = getAnnotation(ID); // Fast path, check for preexisting ann + if (A) return A; + + // No annotation found, ask the annotation manager to create an annotation... + A = AnnotationManager::createAnnotation(ID, this); + assert(A && "AnnotationManager could not create annotation!"); + addAnnotation(A); + return A; +} + +} // End namespace llvm + +#endif diff --git a/include/llvm/Support/CFG.h b/include/llvm/Support/CFG.h new file mode 100644 index 0000000..b0b857b --- /dev/null +++ b/include/llvm/Support/CFG.h @@ -0,0 +1,265 @@ +//===-- llvm/Support/CFG.h - Process LLVM structures as graphs --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines specializations of GraphTraits that allow Function and +// BasicBlock graphs to be treated as proper graphs for generic algorithms. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_CFG_H +#define LLVM_SUPPORT_CFG_H + +#include "llvm/ADT/GraphTraits.h" +#include "llvm/Function.h" +#include "llvm/InstrTypes.h" +#include "llvm/ADT/iterator.h" + +namespace llvm { + +//===--------------------------------------------------------------------===// +// BasicBlock pred_iterator definition +//===--------------------------------------------------------------------===// + +template <class _Ptr, class _USE_iterator> // Predecessor Iterator +class PredIterator : public forward_iterator<_Ptr, ptrdiff_t> { + typedef forward_iterator<_Ptr, ptrdiff_t> super; + _USE_iterator It; +public: + typedef PredIterator<_Ptr,_USE_iterator> _Self; + typedef typename super::pointer pointer; + + inline void advancePastNonTerminators() { + // Loop to ignore non terminator uses (for example PHI nodes)... + while (!It.atEnd() && !isa<TerminatorInst>(*It)) + ++It; + } + + inline PredIterator(_Ptr *bb) : It(bb->use_begin()) { + advancePastNonTerminators(); + } + inline PredIterator(_Ptr *bb, bool) : It(bb->use_end()) {} + + inline bool operator==(const _Self& x) const { return It == x.It; } + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline pointer operator*() const { + assert(!It.atEnd() && "pred_iterator out of range!"); + return cast<TerminatorInst>(*It)->getParent(); + } + inline pointer *operator->() const { return &(operator*()); } + + inline _Self& operator++() { // Preincrement + assert(!It.atEnd() && "pred_iterator out of range!"); + ++It; advancePastNonTerminators(); + return *this; + } + + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } +}; + +typedef PredIterator<BasicBlock, Value::use_iterator> pred_iterator; +typedef PredIterator<const BasicBlock, + Value::use_const_iterator> pred_const_iterator; + +inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); } +inline pred_const_iterator pred_begin(const BasicBlock *BB) { + return pred_const_iterator(BB); +} +inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);} +inline pred_const_iterator pred_end(const BasicBlock *BB) { + return pred_const_iterator(BB, true); +} + + + +//===--------------------------------------------------------------------===// +// BasicBlock succ_iterator definition +//===--------------------------------------------------------------------===// + +template <class Term_, class BB_> // Successor Iterator +class SuccIterator : public bidirectional_iterator<BB_, ptrdiff_t> { + const Term_ Term; + unsigned idx; + typedef bidirectional_iterator<BB_, ptrdiff_t> super; +public: + typedef SuccIterator<Term_, BB_> _Self; + typedef typename super::pointer pointer; + // TODO: This can be random access iterator, need operator+ and stuff tho + + inline SuccIterator(Term_ T) : Term(T), idx(0) { // begin iterator + assert(T && "getTerminator returned null!"); + } + inline SuccIterator(Term_ T, bool) // end iterator + : Term(T), idx(Term->getNumSuccessors()) { + assert(T && "getTerminator returned null!"); + } + + inline const _Self &operator=(const _Self &I) { + assert(Term == I.Term &&"Cannot assign iterators to two different blocks!"); + idx = I.idx; + return *this; + } + + /// getSuccessorIndex - This is used to interface between code that wants to + /// operate on terminator instructions directly. + unsigned getSuccessorIndex() const { return idx; } + + inline bool operator==(const _Self& x) const { return idx == x.idx; } + inline bool operator!=(const _Self& x) const { return !operator==(x); } + + inline pointer operator*() const { return Term->getSuccessor(idx); } + inline pointer operator->() const { return operator*(); } + + inline _Self& operator++() { ++idx; return *this; } // Preincrement + inline _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } + + inline _Self& operator--() { --idx; return *this; } // Predecrement + inline _Self operator--(int) { // Postdecrement + _Self tmp = *this; --*this; return tmp; + } +}; + +typedef SuccIterator<TerminatorInst*, BasicBlock> succ_iterator; +typedef SuccIterator<const TerminatorInst*, + const BasicBlock> succ_const_iterator; + +inline succ_iterator succ_begin(BasicBlock *BB) { + return succ_iterator(BB->getTerminator()); +} +inline succ_const_iterator succ_begin(const BasicBlock *BB) { + return succ_const_iterator(BB->getTerminator()); +} +inline succ_iterator succ_end(BasicBlock *BB) { + return succ_iterator(BB->getTerminator(), true); +} +inline succ_const_iterator succ_end(const BasicBlock *BB) { + return succ_const_iterator(BB->getTerminator(), true); +} + + + +//===--------------------------------------------------------------------===// +// GraphTraits specializations for basic block graphs (CFGs) +//===--------------------------------------------------------------------===// + +// Provide specializations of GraphTraits to be able to treat a function as a +// graph of basic blocks... + +template <> struct GraphTraits<BasicBlock*> { + typedef BasicBlock NodeType; + typedef succ_iterator ChildIteratorType; + + static NodeType *getEntryNode(BasicBlock *BB) { return BB; } + static inline ChildIteratorType child_begin(NodeType *N) { + return succ_begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return succ_end(N); + } +}; + +template <> struct GraphTraits<const BasicBlock*> { + typedef const BasicBlock NodeType; + typedef succ_const_iterator ChildIteratorType; + + static NodeType *getEntryNode(const BasicBlock *BB) { return BB; } + + static inline ChildIteratorType child_begin(NodeType *N) { + return succ_begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return succ_end(N); + } +}; + +// Provide specializations of GraphTraits to be able to treat a function as a +// graph of basic blocks... and to walk it in inverse order. Inverse order for +// a function is considered to be when traversing the predecessor edges of a BB +// instead of the successor edges. +// +template <> struct GraphTraits<Inverse<BasicBlock*> > { + typedef BasicBlock NodeType; + typedef pred_iterator ChildIteratorType; + static NodeType *getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; } + static inline ChildIteratorType child_begin(NodeType *N) { + return pred_begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return pred_end(N); + } +}; + +template <> struct GraphTraits<Inverse<const BasicBlock*> > { + typedef const BasicBlock NodeType; + typedef pred_const_iterator ChildIteratorType; + static NodeType *getEntryNode(Inverse<const BasicBlock*> G) { + return G.Graph; + } + static inline ChildIteratorType child_begin(NodeType *N) { + return pred_begin(N); + } + static inline ChildIteratorType child_end(NodeType *N) { + return pred_end(N); + } +}; + + + +//===--------------------------------------------------------------------===// +// GraphTraits specializations for function basic block graphs (CFGs) +//===--------------------------------------------------------------------===// + +// Provide specializations of GraphTraits to be able to treat a function as a +// graph of basic blocks... these are the same as the basic block iterators, +// except that the root node is implicitly the first node of the function. +// +template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> { + static NodeType *getEntryNode(Function *F) { return &F->getEntryBlock(); } + + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + typedef Function::iterator nodes_iterator; + static nodes_iterator nodes_begin(Function *F) { return F->begin(); } + static nodes_iterator nodes_end (Function *F) { return F->end(); } +}; +template <> struct GraphTraits<const Function*> : + public GraphTraits<const BasicBlock*> { + static NodeType *getEntryNode(const Function *F) {return &F->getEntryBlock();} + + // nodes_iterator/begin/end - Allow iteration over all nodes in the graph + typedef Function::const_iterator nodes_iterator; + static nodes_iterator nodes_begin(const Function *F) { return F->begin(); } + static nodes_iterator nodes_end (const Function *F) { return F->end(); } +}; + + +// Provide specializations of GraphTraits to be able to treat a function as a +// graph of basic blocks... and to walk it in inverse order. Inverse order for +// a function is considered to be when traversing the predecessor edges of a BB +// instead of the successor edges. +// +template <> struct GraphTraits<Inverse<Function*> > : + public GraphTraits<Inverse<BasicBlock*> > { + static NodeType *getEntryNode(Inverse<Function*> G) { + return &G.Graph->getEntryBlock(); + } +}; +template <> struct GraphTraits<Inverse<const Function*> > : + public GraphTraits<Inverse<const BasicBlock*> > { + static NodeType *getEntryNode(Inverse<const Function *> G) { + return &G.Graph->getEntryBlock(); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/CallSite.h b/include/llvm/Support/CallSite.h new file mode 100644 index 0000000..dc41590 --- /dev/null +++ b/include/llvm/Support/CallSite.h @@ -0,0 +1,199 @@ +//===-- llvm/Support/CallSite.h - Abstract Call & Invoke instrs -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the CallSite class, which is a handy wrapper for code that +// wants to treat Call and Invoke instructions in a generic way. +// +// NOTE: This class is supposed to have "value semantics". So it should be +// passed by value, not by reference; it should not be "new"ed or "delete"d. It +// is efficiently copyable, assignable and constructable, with cost equivalent +// to copying a pointer (notice that it has only a single data member). +// The internal representation carries a flag which indicates which of the two +// variants is enclosed. This allows for cheaper checks when various accessors +// of CallSite are employed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_CALLSITE_H +#define LLVM_SUPPORT_CALLSITE_H + +#include "llvm/Attributes.h" +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/BasicBlock.h" +#include "llvm/Instruction.h" + +namespace llvm { + +class CallInst; +class InvokeInst; + +class CallSite { + PointerIntPair<Instruction*, 1, bool> I; +public: + CallSite() : I(0, false) {} + CallSite(CallInst *CI) : I(reinterpret_cast<Instruction*>(CI), true) {} + CallSite(InvokeInst *II) : I(reinterpret_cast<Instruction*>(II), false) {} + CallSite(Instruction *C); + CallSite(const CallSite &CS) : I(CS.I) {} + CallSite &operator=(const CallSite &CS) { I = CS.I; return *this; } + + bool operator==(const CallSite &CS) const { return I == CS.I; } + bool operator!=(const CallSite &CS) const { return I != CS.I; } + + /// CallSite::get - This static method is sort of like a constructor. It will + /// create an appropriate call site for a Call or Invoke instruction, but it + /// can also create a null initialized CallSite object for something which is + /// NOT a call site. + /// + static CallSite get(Value *V) { + if (Instruction *I = dyn_cast<Instruction>(V)) { + if (I->getOpcode() == Instruction::Call) + return CallSite(reinterpret_cast<CallInst*>(I)); + else if (I->getOpcode() == Instruction::Invoke) + return CallSite(reinterpret_cast<InvokeInst*>(I)); + } + return CallSite(); + } + + /// getCallingConv/setCallingConv - get or set the calling convention of the + /// call. + unsigned getCallingConv() const; + void setCallingConv(unsigned CC); + + /// getAttributes/setAttributes - get or set the parameter attributes of + /// the call. + const AttrListPtr &getAttributes() const; + void setAttributes(const AttrListPtr &PAL); + + /// paramHasAttr - whether the call or the callee has the given attribute. + bool paramHasAttr(uint16_t i, Attributes attr) const; + + /// @brief Extract the alignment for a call or parameter (0=unknown). + uint16_t getParamAlignment(uint16_t i) const; + + /// @brief Determine if the call does not access memory. + bool doesNotAccessMemory() const; + void setDoesNotAccessMemory(bool doesNotAccessMemory = true); + + /// @brief Determine if the call does not access or only reads memory. + bool onlyReadsMemory() const; + void setOnlyReadsMemory(bool onlyReadsMemory = true); + + /// @brief Determine if the call cannot return. + bool doesNotReturn() const; + void setDoesNotReturn(bool doesNotReturn = true); + + /// @brief Determine if the call cannot unwind. + bool doesNotThrow() const; + void setDoesNotThrow(bool doesNotThrow = true); + + /// getType - Return the type of the instruction that generated this call site + /// + const Type *getType() const { return getInstruction()->getType(); } + + /// isCall - true if a CallInst is enclosed. + /// Note that !isCall() does not mean it is an InvokeInst enclosed, + /// it also could signify a NULL Instruction pointer. + bool isCall() const { return I.getInt(); } + + /// isInvoke - true if a InvokeInst is enclosed. + /// + bool isInvoke() const { return getInstruction() && !I.getInt(); } + + /// getInstruction - Return the instruction this call site corresponds to + /// + Instruction *getInstruction() const { return I.getPointer(); } + + /// getCaller - Return the caller function for this call site + /// + Function *getCaller() const { return getInstruction() + ->getParent()->getParent(); } + + /// getCalledValue - Return the pointer to function that is being called... + /// + Value *getCalledValue() const { + assert(getInstruction() && "Not a call or invoke instruction!"); + return getInstruction()->getOperand(0); + } + + /// getCalledFunction - Return the function being called if this is a direct + /// call, otherwise return null (if it's an indirect call). + /// + Function *getCalledFunction() const { + return dyn_cast<Function>(getCalledValue()); + } + + /// setCalledFunction - Set the callee to the specified value... + /// + void setCalledFunction(Value *V) { + assert(getInstruction() && "Not a call or invoke instruction!"); + getInstruction()->setOperand(0, V); + } + + Value *getArgument(unsigned ArgNo) const { + assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!"); + return *(arg_begin()+ArgNo); + } + + void setArgument(unsigned ArgNo, Value* newVal) { + assert(getInstruction() && "Not a call or invoke instruction!"); + assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!"); + getInstruction()->setOperand(getArgumentOffset() + ArgNo, newVal); + } + + /// Given an operand number, returns the argument that corresponds to it. + /// OperandNo must be a valid operand number that actually corresponds to an + /// argument. + unsigned getArgumentNo(unsigned OperandNo) const { + assert(OperandNo >= getArgumentOffset() && "Operand number passed was not " + "a valid argument"); + return OperandNo - getArgumentOffset(); + } + + /// hasArgument - Returns true if this CallSite passes the given Value* as an + /// argument to the called function. + bool hasArgument(const Value *Arg) const; + + /// arg_iterator - The type of iterator to use when looping over actual + /// arguments at this call site... + typedef User::op_iterator arg_iterator; + + /// arg_begin/arg_end - Return iterators corresponding to the actual argument + /// list for a call site. + arg_iterator arg_begin() const { + assert(getInstruction() && "Not a call or invoke instruction!"); + // Skip non-arguments + return getInstruction()->op_begin() + getArgumentOffset(); + } + + arg_iterator arg_end() const { return getInstruction()->op_end(); } + bool arg_empty() const { return arg_end() == arg_begin(); } + unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); } + + bool operator<(const CallSite &CS) const { + return getInstruction() < CS.getInstruction(); + } + + bool isCallee(Value::use_iterator UI) const { + return getInstruction()->op_begin() == &UI.getUse(); + } + +private: + /// Returns the operand number of the first argument + unsigned getArgumentOffset() const { + if (isCall()) + return 1; // Skip Function + else + return 3; // Skip Function, BB, BB + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/Casting.h b/include/llvm/Support/Casting.h new file mode 100644 index 0000000..48988f8 --- /dev/null +++ b/include/llvm/Support/Casting.h @@ -0,0 +1,303 @@ +//===-- llvm/Support/Casting.h - Allow flexible, checked, casts -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(), +// and dyn_cast_or_null<X>() templates. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_CASTING_H +#define LLVM_SUPPORT_CASTING_H + +#include <cassert> + +namespace llvm { + +//===----------------------------------------------------------------------===// +// isa<x> Support Templates +//===----------------------------------------------------------------------===// + +template<typename FromCl> struct isa_impl_cl; + +// Define a template that can be specialized by smart pointers to reflect the +// fact that they are automatically dereferenced, and are not involved with the +// template selection process... the default implementation is a noop. +// +template<typename From> struct simplify_type { + typedef From SimpleType; // The real type this represents... + + // An accessor to get the real value... + static SimpleType &getSimplifiedValue(From &Val) { return Val; } +}; + +template<typename From> struct simplify_type<const From> { + typedef const From SimpleType; + static SimpleType &getSimplifiedValue(const From &Val) { + return simplify_type<From>::getSimplifiedValue(static_cast<From&>(Val)); + } +}; + + +// isa<X> - Return true if the parameter to the template is an instance of the +// template type argument. Used like this: +// +// if (isa<Type*>(myVal)) { ... } +// +template <typename To, typename From> +inline bool isa_impl(const From &Val) { + return To::classof(&Val); +} + +template<typename To, typename From, typename SimpleType> +struct isa_impl_wrap { + // When From != SimplifiedType, we can simplify the type some more by using + // the simplify_type template. + static bool doit(const From &Val) { + return isa_impl_cl<const SimpleType>::template + isa<To>(simplify_type<const From>::getSimplifiedValue(Val)); + } +}; + +template<typename To, typename FromTy> +struct isa_impl_wrap<To, const FromTy, const FromTy> { + // When From == SimpleType, we are as simple as we are going to get. + static bool doit(const FromTy &Val) { + return isa_impl<To,FromTy>(Val); + } +}; + +// isa_impl_cl - Use class partial specialization to transform types to a single +// canonical form for isa_impl. +// +template<typename FromCl> +struct isa_impl_cl { + template<class ToCl> + static bool isa(const FromCl &Val) { + return isa_impl_wrap<ToCl,const FromCl, + typename simplify_type<const FromCl>::SimpleType>::doit(Val); + } +}; + +// Specialization used to strip const qualifiers off of the FromCl type... +template<typename FromCl> +struct isa_impl_cl<const FromCl> { + template<class ToCl> + static bool isa(const FromCl &Val) { + return isa_impl_cl<FromCl>::template isa<ToCl>(Val); + } +}; + +// Define pointer traits in terms of base traits... +template<class FromCl> +struct isa_impl_cl<FromCl*> { + template<class ToCl> + static bool isa(FromCl *Val) { + return isa_impl_cl<FromCl>::template isa<ToCl>(*Val); + } +}; + +// Define reference traits in terms of base traits... +template<class FromCl> +struct isa_impl_cl<FromCl&> { + template<class ToCl> + static bool isa(FromCl &Val) { + return isa_impl_cl<FromCl>::template isa<ToCl>(&Val); + } +}; + +template <class X, class Y> +inline bool isa(const Y &Val) { + return isa_impl_cl<Y>::template isa<X>(Val); +} + +//===----------------------------------------------------------------------===// +// cast<x> Support Templates +//===----------------------------------------------------------------------===// + +template<class To, class From> struct cast_retty; + + +// Calculate what type the 'cast' function should return, based on a requested +// type of To and a source type of From. +template<class To, class From> struct cast_retty_impl { + typedef To& ret_type; // Normal case, return Ty& +}; +template<class To, class From> struct cast_retty_impl<To, const From> { + typedef const To &ret_type; // Normal case, return Ty& +}; + +template<class To, class From> struct cast_retty_impl<To, From*> { + typedef To* ret_type; // Pointer arg case, return Ty* +}; + +template<class To, class From> struct cast_retty_impl<To, const From*> { + typedef const To* ret_type; // Constant pointer arg case, return const Ty* +}; + +template<class To, class From> struct cast_retty_impl<To, const From*const> { + typedef const To* ret_type; // Constant pointer arg case, return const Ty* +}; + + +template<class To, class From, class SimpleFrom> +struct cast_retty_wrap { + // When the simplified type and the from type are not the same, use the type + // simplifier to reduce the type, then reuse cast_retty_impl to get the + // resultant type. + typedef typename cast_retty<To, SimpleFrom>::ret_type ret_type; +}; + +template<class To, class FromTy> +struct cast_retty_wrap<To, FromTy, FromTy> { + // When the simplified type is equal to the from type, use it directly. + typedef typename cast_retty_impl<To,FromTy>::ret_type ret_type; +}; + +template<class To, class From> +struct cast_retty { + typedef typename cast_retty_wrap<To, From, + typename simplify_type<From>::SimpleType>::ret_type ret_type; +}; + +// Ensure the non-simple values are converted using the simplify_type template +// that may be specialized by smart pointers... +// +template<class To, class From, class SimpleFrom> struct cast_convert_val { + // This is not a simple type, use the template to simplify it... + static typename cast_retty<To, From>::ret_type doit(const From &Val) { + return cast_convert_val<To, SimpleFrom, + typename simplify_type<SimpleFrom>::SimpleType>::doit( + simplify_type<From>::getSimplifiedValue(Val)); + } +}; + +template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> { + // This _is_ a simple type, just cast it. + static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) { + return reinterpret_cast<typename cast_retty<To, FromTy>::ret_type>( + const_cast<FromTy&>(Val)); + } +}; + + + +// cast<X> - Return the argument parameter cast to the specified type. This +// casting operator asserts that the type is correct, so it does not return null +// on failure. But it will correctly return NULL when the input is NULL. +// Used Like this: +// +// cast<Instruction>(myVal)->getParent() +// +template <class X, class Y> +inline typename cast_retty<X, Y>::ret_type cast(const Y &Val) { + assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!"); + return cast_convert_val<X, Y, + typename simplify_type<Y>::SimpleType>::doit(Val); +} + +// cast_or_null<X> - Functionally identical to cast, except that a null value is +// accepted. +// +template <class X, class Y> +inline typename cast_retty<X, Y*>::ret_type cast_or_null(Y *Val) { + if (Val == 0) return 0; + assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"); + return cast<X>(Val); +} + + +// dyn_cast<X> - Return the argument parameter cast to the specified type. This +// casting operator returns null if the argument is of the wrong type, so it can +// be used to test for a type as well as cast if successful. This should be +// used in the context of an if statement like this: +// +// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... } +// + +template <class X, class Y> +inline typename cast_retty<X, Y>::ret_type dyn_cast(const Y &Val) { + return isa<X>(Val) ? cast<X, Y>(Val) : 0; +} + +// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null +// value is accepted. +// +template <class X, class Y> +inline typename cast_retty<X, Y>::ret_type dyn_cast_or_null(const Y &Val) { + return (Val && isa<X>(Val)) ? cast<X, Y>(Val) : 0; +} + + +#ifdef DEBUG_CAST_OPERATORS +#include "llvm/Support/Streams.h" + +struct bar { + bar() {} +private: + bar(const bar &); +}; +struct foo { + void ext() const; + /* static bool classof(const bar *X) { + cerr << "Classof: " << X << "\n"; + return true; + }*/ +}; + +template <> inline bool isa_impl<foo,bar>(const bar &Val) { + cerr << "Classof: " << &Val << "\n"; + return true; +} + + +bar *fub(); +void test(bar &B1, const bar *B2) { + // test various configurations of const + const bar &B3 = B1; + const bar *const B4 = B2; + + // test isa + if (!isa<foo>(B1)) return; + if (!isa<foo>(B2)) return; + if (!isa<foo>(B3)) return; + if (!isa<foo>(B4)) return; + + // test cast + foo &F1 = cast<foo>(B1); + const foo *F3 = cast<foo>(B2); + const foo *F4 = cast<foo>(B2); + const foo &F8 = cast<foo>(B3); + const foo *F9 = cast<foo>(B4); + foo *F10 = cast<foo>(fub()); + + // test cast_or_null + const foo *F11 = cast_or_null<foo>(B2); + const foo *F12 = cast_or_null<foo>(B2); + const foo *F13 = cast_or_null<foo>(B4); + const foo *F14 = cast_or_null<foo>(fub()); // Shouldn't print. + + // These lines are errors... + //foo *F20 = cast<foo>(B2); // Yields const foo* + //foo &F21 = cast<foo>(B3); // Yields const foo& + //foo *F22 = cast<foo>(B4); // Yields const foo* + //foo &F23 = cast_or_null<foo>(B1); + //const foo &F24 = cast_or_null<foo>(B3); +} + +bar *fub() { return 0; } +void main() { + bar B; + test(B, &B); +} + +#endif + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/CommandLine.h b/include/llvm/Support/CommandLine.h new file mode 100644 index 0000000..fa3b870 --- /dev/null +++ b/include/llvm/Support/CommandLine.h @@ -0,0 +1,1386 @@ +//===- llvm/Support/CommandLine.h - Command line handler --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class implements a command line argument processor that is useful when +// creating a tool. It provides a simple, minimalistic interface that is easily +// extensible and supports nonlocal (library) command line options. +// +// Note that rather than trying to figure out what this code does, you should +// read the library documentation located in docs/CommandLine.html or looks at +// the many example usages in tools/*/*.cpp +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_COMMANDLINE_H +#define LLVM_SUPPORT_COMMANDLINE_H + +#include "llvm/Support/type_traits.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/Compiler.h" +#include "llvm/ADT/SmallVector.h" +#include <cassert> +#include <climits> +#include <cstdarg> +#include <string> +#include <utility> +#include <vector> + +namespace llvm { + +/// cl Namespace - This namespace contains all of the command line option +/// processing machinery. It is intentionally a short name to make qualified +/// usage concise. +namespace cl { + +//===----------------------------------------------------------------------===// +// ParseCommandLineOptions - Command line option processing entry point. +// +void ParseCommandLineOptions(int argc, char **argv, + const char *Overview = 0, + bool ReadResponseFiles = false); + +//===----------------------------------------------------------------------===// +// ParseEnvironmentOptions - Environment variable option processing alternate +// entry point. +// +void ParseEnvironmentOptions(const char *progName, const char *envvar, + const char *Overview = 0, + bool ReadResponseFiles = false); + +///===---------------------------------------------------------------------===// +/// SetVersionPrinter - Override the default (LLVM specific) version printer +/// used to print out the version when --version is given +/// on the command line. This allows other systems using the +/// CommandLine utilities to print their own version string. +void SetVersionPrinter(void (*func)()); + + +// MarkOptionsChanged - Internal helper function. +void MarkOptionsChanged(); + +//===----------------------------------------------------------------------===// +// Flags permitted to be passed to command line arguments +// + +enum NumOccurrences { // Flags for the number of occurrences allowed + Optional = 0x01, // Zero or One occurrence + ZeroOrMore = 0x02, // Zero or more occurrences allowed + Required = 0x03, // One occurrence required + OneOrMore = 0x04, // One or more occurrences required + + // ConsumeAfter - Indicates that this option is fed anything that follows the + // last positional argument required by the application (it is an error if + // there are zero positional arguments, and a ConsumeAfter option is used). + // Thus, for example, all arguments to LLI are processed until a filename is + // found. Once a filename is found, all of the succeeding arguments are + // passed, unprocessed, to the ConsumeAfter option. + // + ConsumeAfter = 0x05, + + OccurrencesMask = 0x07 +}; + +enum ValueExpected { // Is a value required for the option? + ValueOptional = 0x08, // The value can appear... or not + ValueRequired = 0x10, // The value is required to appear! + ValueDisallowed = 0x18, // A value may not be specified (for flags) + ValueMask = 0x18 +}; + +enum OptionHidden { // Control whether -help shows this option + NotHidden = 0x20, // Option included in --help & --help-hidden + Hidden = 0x40, // -help doesn't, but --help-hidden does + ReallyHidden = 0x60, // Neither --help nor --help-hidden show this arg + HiddenMask = 0x60 +}; + +// Formatting flags - This controls special features that the option might have +// that cause it to be parsed differently... +// +// Prefix - This option allows arguments that are otherwise unrecognized to be +// matched by options that are a prefix of the actual value. This is useful for +// cases like a linker, where options are typically of the form '-lfoo' or +// '-L../../include' where -l or -L are the actual flags. When prefix is +// enabled, and used, the value for the flag comes from the suffix of the +// argument. +// +// Grouping - With this option enabled, multiple letter options are allowed to +// bunch together with only a single hyphen for the whole group. This allows +// emulation of the behavior that ls uses for example: ls -la === ls -l -a +// + +enum FormattingFlags { + NormalFormatting = 0x000, // Nothing special + Positional = 0x080, // Is a positional argument, no '-' required + Prefix = 0x100, // Can this option directly prefix its value? + Grouping = 0x180, // Can this option group with other options? + FormattingMask = 0x180 // Union of the above flags. +}; + +enum MiscFlags { // Miscellaneous flags to adjust argument + CommaSeparated = 0x200, // Should this cl::list split between commas? + PositionalEatsArgs = 0x400, // Should this positional cl::list eat -args? + Sink = 0x800, // Should this cl::list eat all unknown options? + MiscMask = 0xE00 // Union of the above flags. +}; + + + +//===----------------------------------------------------------------------===// +// Option Base class +// +class alias; +class Option { + friend class alias; + + // handleOccurrences - Overriden by subclasses to handle the value passed into + // an argument. Should return true if there was an error processing the + // argument and the program should exit. + // + virtual bool handleOccurrence(unsigned pos, const char *ArgName, + const std::string &Arg) = 0; + + virtual enum ValueExpected getValueExpectedFlagDefault() const { + return ValueOptional; + } + + // Out of line virtual function to provide home for the class. + virtual void anchor(); + + int NumOccurrences; // The number of times specified + int Flags; // Flags for the argument + unsigned Position; // Position of last occurrence of the option + unsigned AdditionalVals;// Greater than 0 for multi-valued option. + Option *NextRegistered; // Singly linked list of registered options. +public: + const char *ArgStr; // The argument string itself (ex: "help", "o") + const char *HelpStr; // The descriptive text message for --help + const char *ValueStr; // String describing what the value of this option is + + inline enum NumOccurrences getNumOccurrencesFlag() const { + return static_cast<enum NumOccurrences>(Flags & OccurrencesMask); + } + inline enum ValueExpected getValueExpectedFlag() const { + int VE = Flags & ValueMask; + return VE ? static_cast<enum ValueExpected>(VE) + : getValueExpectedFlagDefault(); + } + inline enum OptionHidden getOptionHiddenFlag() const { + return static_cast<enum OptionHidden>(Flags & HiddenMask); + } + inline enum FormattingFlags getFormattingFlag() const { + return static_cast<enum FormattingFlags>(Flags & FormattingMask); + } + inline unsigned getMiscFlags() const { + return Flags & MiscMask; + } + inline unsigned getPosition() const { return Position; } + inline unsigned getNumAdditionalVals() const { return AdditionalVals; } + + // hasArgStr - Return true if the argstr != "" + bool hasArgStr() const { return ArgStr[0] != 0; } + + //-------------------------------------------------------------------------=== + // Accessor functions set by OptionModifiers + // + void setArgStr(const char *S) { ArgStr = S; } + void setDescription(const char *S) { HelpStr = S; } + void setValueStr(const char *S) { ValueStr = S; } + + void setFlag(unsigned Flag, unsigned FlagMask) { + Flags &= ~FlagMask; + Flags |= Flag; + } + + void setNumOccurrencesFlag(enum NumOccurrences Val) { + setFlag(Val, OccurrencesMask); + } + void setValueExpectedFlag(enum ValueExpected Val) { setFlag(Val, ValueMask); } + void setHiddenFlag(enum OptionHidden Val) { setFlag(Val, HiddenMask); } + void setFormattingFlag(enum FormattingFlags V) { setFlag(V, FormattingMask); } + void setMiscFlag(enum MiscFlags M) { setFlag(M, M); } + void setPosition(unsigned pos) { Position = pos; } +protected: + explicit Option(unsigned DefaultFlags) + : NumOccurrences(0), Flags(DefaultFlags | NormalFormatting), Position(0), + AdditionalVals(0), NextRegistered(0), + ArgStr(""), HelpStr(""), ValueStr("") { + assert(getNumOccurrencesFlag() != 0 && + getOptionHiddenFlag() != 0 && "Not all default flags specified!"); + } + + inline void setNumAdditionalVals(unsigned n) + { AdditionalVals = n; } +public: + // addArgument - Register this argument with the commandline system. + // + void addArgument(); + + Option *getNextRegisteredOption() const { return NextRegistered; } + + // Return the width of the option tag for printing... + virtual size_t getOptionWidth() const = 0; + + // printOptionInfo - Print out information about this option. The + // to-be-maintained width is specified. + // + virtual void printOptionInfo(size_t GlobalWidth) const = 0; + + virtual void getExtraOptionNames(std::vector<const char*> &) {} + + // addOccurrence - Wrapper around handleOccurrence that enforces Flags + // + bool addOccurrence(unsigned pos, const char *ArgName, + const std::string &Value, bool MultiArg = false); + + // Prints option name followed by message. Always returns true. + bool error(std::string Message, const char *ArgName = 0); + +public: + inline int getNumOccurrences() const { return NumOccurrences; } + virtual ~Option() {} +}; + + +//===----------------------------------------------------------------------===// +// Command line option modifiers that can be used to modify the behavior of +// command line option parsers... +// + +// desc - Modifier to set the description shown in the --help output... +struct desc { + const char *Desc; + desc(const char *Str) : Desc(Str) {} + void apply(Option &O) const { O.setDescription(Desc); } +}; + +// value_desc - Modifier to set the value description shown in the --help +// output... +struct value_desc { + const char *Desc; + value_desc(const char *Str) : Desc(Str) {} + void apply(Option &O) const { O.setValueStr(Desc); } +}; + +// init - Specify a default (initial) value for the command line argument, if +// the default constructor for the argument type does not give you what you +// want. This is only valid on "opt" arguments, not on "list" arguments. +// +template<class Ty> +struct initializer { + const Ty &Init; + initializer(const Ty &Val) : Init(Val) {} + + template<class Opt> + void apply(Opt &O) const { O.setInitialValue(Init); } +}; + +template<class Ty> +initializer<Ty> init(const Ty &Val) { + return initializer<Ty>(Val); +} + + +// location - Allow the user to specify which external variable they want to +// store the results of the command line argument processing into, if they don't +// want to store it in the option itself. +// +template<class Ty> +struct LocationClass { + Ty &Loc; + LocationClass(Ty &L) : Loc(L) {} + + template<class Opt> + void apply(Opt &O) const { O.setLocation(O, Loc); } +}; + +template<class Ty> +LocationClass<Ty> location(Ty &L) { return LocationClass<Ty>(L); } + + +//===----------------------------------------------------------------------===// +// Enum valued command line option +// +#define clEnumVal(ENUMVAL, DESC) #ENUMVAL, int(ENUMVAL), DESC +#define clEnumValN(ENUMVAL, FLAGNAME, DESC) FLAGNAME, int(ENUMVAL), DESC +#define clEnumValEnd (reinterpret_cast<void*>(0)) + +// values - For custom data types, allow specifying a group of values together +// as the values that go into the mapping that the option handler uses. Note +// that the values list must always have a 0 at the end of the list to indicate +// that the list has ended. +// +template<class DataType> +class ValuesClass { + // Use a vector instead of a map, because the lists should be short, + // the overhead is less, and most importantly, it keeps them in the order + // inserted so we can print our option out nicely. + SmallVector<std::pair<const char *, std::pair<int, const char *> >,4> Values; + void processValues(va_list Vals); +public: + ValuesClass(const char *EnumName, DataType Val, const char *Desc, + va_list ValueArgs) { + // Insert the first value, which is required. + Values.push_back(std::make_pair(EnumName, std::make_pair(Val, Desc))); + + // Process the varargs portion of the values... + while (const char *enumName = va_arg(ValueArgs, const char *)) { + DataType EnumVal = static_cast<DataType>(va_arg(ValueArgs, int)); + const char *EnumDesc = va_arg(ValueArgs, const char *); + Values.push_back(std::make_pair(enumName, // Add value to value map + std::make_pair(EnumVal, EnumDesc))); + } + } + + template<class Opt> + void apply(Opt &O) const { + for (unsigned i = 0, e = static_cast<unsigned>(Values.size()); + i != e; ++i) + O.getParser().addLiteralOption(Values[i].first, Values[i].second.first, + Values[i].second.second); + } +}; + +template<class DataType> +ValuesClass<DataType> END_WITH_NULL values(const char *Arg, DataType Val, + const char *Desc, ...) { + va_list ValueArgs; + va_start(ValueArgs, Desc); + ValuesClass<DataType> Vals(Arg, Val, Desc, ValueArgs); + va_end(ValueArgs); + return Vals; +} + + +//===----------------------------------------------------------------------===// +// parser class - Parameterizable parser for different data types. By default, +// known data types (string, int, bool) have specialized parsers, that do what +// you would expect. The default parser, used for data types that are not +// built-in, uses a mapping table to map specific options to values, which is +// used, among other things, to handle enum types. + +//-------------------------------------------------- +// generic_parser_base - This class holds all the non-generic code that we do +// not need replicated for every instance of the generic parser. This also +// allows us to put stuff into CommandLine.cpp +// +struct generic_parser_base { + virtual ~generic_parser_base() {} // Base class should have virtual-dtor + + // getNumOptions - Virtual function implemented by generic subclass to + // indicate how many entries are in Values. + // + virtual unsigned getNumOptions() const = 0; + + // getOption - Return option name N. + virtual const char *getOption(unsigned N) const = 0; + + // getDescription - Return description N + virtual const char *getDescription(unsigned N) const = 0; + + // Return the width of the option tag for printing... + virtual size_t getOptionWidth(const Option &O) const; + + // printOptionInfo - Print out information about this option. The + // to-be-maintained width is specified. + // + virtual void printOptionInfo(const Option &O, size_t GlobalWidth) const; + + void initialize(Option &O) { + // All of the modifiers for the option have been processed by now, so the + // argstr field should be stable, copy it down now. + // + hasArgStr = O.hasArgStr(); + } + + void getExtraOptionNames(std::vector<const char*> &OptionNames) { + // If there has been no argstr specified, that means that we need to add an + // argument for every possible option. This ensures that our options are + // vectored to us. + if (!hasArgStr) + for (unsigned i = 0, e = getNumOptions(); i != e; ++i) + OptionNames.push_back(getOption(i)); + } + + + enum ValueExpected getValueExpectedFlagDefault() const { + // If there is an ArgStr specified, then we are of the form: + // + // -opt=O2 or -opt O2 or -optO2 + // + // In which case, the value is required. Otherwise if an arg str has not + // been specified, we are of the form: + // + // -O2 or O2 or -la (where -l and -a are separate options) + // + // If this is the case, we cannot allow a value. + // + if (hasArgStr) + return ValueRequired; + else + return ValueDisallowed; + } + + // findOption - Return the option number corresponding to the specified + // argument string. If the option is not found, getNumOptions() is returned. + // + unsigned findOption(const char *Name); + +protected: + bool hasArgStr; +}; + +// Default parser implementation - This implementation depends on having a +// mapping of recognized options to values of some sort. In addition to this, +// each entry in the mapping also tracks a help message that is printed with the +// command line option for --help. Because this is a simple mapping parser, the +// data type can be any unsupported type. +// +template <class DataType> +class parser : public generic_parser_base { +protected: + SmallVector<std::pair<const char *, + std::pair<DataType, const char *> >, 8> Values; +public: + typedef DataType parser_data_type; + + // Implement virtual functions needed by generic_parser_base + unsigned getNumOptions() const { return unsigned(Values.size()); } + const char *getOption(unsigned N) const { return Values[N].first; } + const char *getDescription(unsigned N) const { + return Values[N].second.second; + } + + // parse - Return true on error. + bool parse(Option &O, const char *ArgName, const std::string &Arg, + DataType &V) { + std::string ArgVal; + if (hasArgStr) + ArgVal = Arg; + else + ArgVal = ArgName; + + for (unsigned i = 0, e = static_cast<unsigned>(Values.size()); + i != e; ++i) + if (ArgVal == Values[i].first) { + V = Values[i].second.first; + return false; + } + + return O.error(": Cannot find option named '" + ArgVal + "'!"); + } + + /// addLiteralOption - Add an entry to the mapping table. + /// + template <class DT> + void addLiteralOption(const char *Name, const DT &V, const char *HelpStr) { + assert(findOption(Name) == Values.size() && "Option already exists!"); + Values.push_back(std::make_pair(Name, + std::make_pair(static_cast<DataType>(V),HelpStr))); + MarkOptionsChanged(); + } + + /// removeLiteralOption - Remove the specified option. + /// + void removeLiteralOption(const char *Name) { + unsigned N = findOption(Name); + assert(N != Values.size() && "Option not found!"); + Values.erase(Values.begin()+N); + } +}; + +//-------------------------------------------------- +// basic_parser - Super class of parsers to provide boilerplate code +// +struct basic_parser_impl { // non-template implementation of basic_parser<t> + virtual ~basic_parser_impl() {} + + enum ValueExpected getValueExpectedFlagDefault() const { + return ValueRequired; + } + + void getExtraOptionNames(std::vector<const char*> &) {} + + void initialize(Option &) {} + + // Return the width of the option tag for printing... + size_t getOptionWidth(const Option &O) const; + + // printOptionInfo - Print out information about this option. The + // to-be-maintained width is specified. + // + void printOptionInfo(const Option &O, size_t GlobalWidth) const; + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "value"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +// basic_parser - The real basic parser is just a template wrapper that provides +// a typedef for the provided data type. +// +template<class DataType> +struct basic_parser : public basic_parser_impl { + typedef DataType parser_data_type; +}; + +//-------------------------------------------------- +// parser<bool> +// +template<> +class parser<bool> : public basic_parser<bool> { + const char *ArgStr; +public: + + // parse - Return true on error. + bool parse(Option &O, const char *ArgName, const std::string &Arg, bool &Val); + + template <class Opt> + void initialize(Opt &O) { + ArgStr = O.ArgStr; + } + + enum ValueExpected getValueExpectedFlagDefault() const { + return ValueOptional; + } + + // getValueName - Do not print =<value> at all. + virtual const char *getValueName() const { return 0; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<bool>); + +//-------------------------------------------------- +// parser<boolOrDefault> +enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE }; +template<> +class parser<boolOrDefault> : public basic_parser<boolOrDefault> { +public: + // parse - Return true on error. + bool parse(Option &O, const char *ArgName, const std::string &Arg, + boolOrDefault &Val); + + enum ValueExpected getValueExpectedFlagDefault() const { + return ValueOptional; + } + + // getValueName - Do not print =<value> at all. + virtual const char *getValueName() const { return 0; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<boolOrDefault>); + +//-------------------------------------------------- +// parser<int> +// +template<> +class parser<int> : public basic_parser<int> { +public: + // parse - Return true on error. + bool parse(Option &O, const char *ArgName, const std::string &Arg, int &Val); + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "int"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<int>); + + +//-------------------------------------------------- +// parser<unsigned> +// +template<> +class parser<unsigned> : public basic_parser<unsigned> { +public: + // parse - Return true on error. + bool parse(Option &O, const char *AN, const std::string &Arg, unsigned &Val); + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "uint"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<unsigned>); + +//-------------------------------------------------- +// parser<double> +// +template<> +class parser<double> : public basic_parser<double> { +public: + // parse - Return true on error. + bool parse(Option &O, const char *AN, const std::string &Arg, double &Val); + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "number"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<double>); + +//-------------------------------------------------- +// parser<float> +// +template<> +class parser<float> : public basic_parser<float> { +public: + // parse - Return true on error. + bool parse(Option &O, const char *AN, const std::string &Arg, float &Val); + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "number"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<float>); + +//-------------------------------------------------- +// parser<std::string> +// +template<> +class parser<std::string> : public basic_parser<std::string> { +public: + // parse - Return true on error. + bool parse(Option &, const char *, const std::string &Arg, + std::string &Value) { + Value = Arg; + return false; + } + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "string"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<std::string>); + +//-------------------------------------------------- +// parser<char> +// +template<> +class parser<char> : public basic_parser<char> { +public: + // parse - Return true on error. + bool parse(Option &, const char *, const std::string &Arg, + char &Value) { + Value = Arg[0]; + return false; + } + + // getValueName - Overload in subclass to provide a better default value. + virtual const char *getValueName() const { return "char"; } + + // An out-of-line virtual method to provide a 'home' for this class. + virtual void anchor(); +}; + +EXTERN_TEMPLATE_INSTANTIATION(class basic_parser<char>); + +//===----------------------------------------------------------------------===// +// applicator class - This class is used because we must use partial +// specialization to handle literal string arguments specially (const char* does +// not correctly respond to the apply method). Because the syntax to use this +// is a pain, we have the 'apply' method below to handle the nastiness... +// +template<class Mod> struct applicator { + template<class Opt> + static void opt(const Mod &M, Opt &O) { M.apply(O); } +}; + +// Handle const char* as a special case... +template<unsigned n> struct applicator<char[n]> { + template<class Opt> + static void opt(const char *Str, Opt &O) { O.setArgStr(Str); } +}; +template<unsigned n> struct applicator<const char[n]> { + template<class Opt> + static void opt(const char *Str, Opt &O) { O.setArgStr(Str); } +}; +template<> struct applicator<const char*> { + template<class Opt> + static void opt(const char *Str, Opt &O) { O.setArgStr(Str); } +}; + +template<> struct applicator<NumOccurrences> { + static void opt(NumOccurrences NO, Option &O) { O.setNumOccurrencesFlag(NO); } +}; +template<> struct applicator<ValueExpected> { + static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); } +}; +template<> struct applicator<OptionHidden> { + static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); } +}; +template<> struct applicator<FormattingFlags> { + static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); } +}; +template<> struct applicator<MiscFlags> { + static void opt(MiscFlags MF, Option &O) { O.setMiscFlag(MF); } +}; + +// apply method - Apply a modifier to an option in a type safe way. +template<class Mod, class Opt> +void apply(const Mod &M, Opt *O) { + applicator<Mod>::opt(M, *O); +} + + +//===----------------------------------------------------------------------===// +// opt_storage class + +// Default storage class definition: external storage. This implementation +// assumes the user will specify a variable to store the data into with the +// cl::location(x) modifier. +// +template<class DataType, bool ExternalStorage, bool isClass> +class opt_storage { + DataType *Location; // Where to store the object... + + void check() const { + assert(Location != 0 && "cl::location(...) not specified for a command " + "line option with external storage, " + "or cl::init specified before cl::location()!!"); + } +public: + opt_storage() : Location(0) {} + + bool setLocation(Option &O, DataType &L) { + if (Location) + return O.error(": cl::location(x) specified more than once!"); + Location = &L; + return false; + } + + template<class T> + void setValue(const T &V) { + check(); + *Location = V; + } + + DataType &getValue() { check(); return *Location; } + const DataType &getValue() const { check(); return *Location; } +}; + + +// Define how to hold a class type object, such as a string. Since we can +// inherit from a class, we do so. This makes us exactly compatible with the +// object in all cases that it is used. +// +template<class DataType> +class opt_storage<DataType,false,true> : public DataType { +public: + template<class T> + void setValue(const T &V) { DataType::operator=(V); } + + DataType &getValue() { return *this; } + const DataType &getValue() const { return *this; } +}; + +// Define a partial specialization to handle things we cannot inherit from. In +// this case, we store an instance through containment, and overload operators +// to get at the value. +// +template<class DataType> +class opt_storage<DataType, false, false> { +public: + DataType Value; + + // Make sure we initialize the value with the default constructor for the + // type. + opt_storage() : Value(DataType()) {} + + template<class T> + void setValue(const T &V) { Value = V; } + DataType &getValue() { return Value; } + DataType getValue() const { return Value; } + + // If the datatype is a pointer, support -> on it. + DataType operator->() const { return Value; } +}; + + +//===----------------------------------------------------------------------===// +// opt - A scalar command line option. +// +template <class DataType, bool ExternalStorage = false, + class ParserClass = parser<DataType> > +class opt : public Option, + public opt_storage<DataType, ExternalStorage, + is_class<DataType>::value> { + ParserClass Parser; + + virtual bool handleOccurrence(unsigned pos, const char *ArgName, + const std::string &Arg) { + typename ParserClass::parser_data_type Val = + typename ParserClass::parser_data_type(); + if (Parser.parse(*this, ArgName, Arg, Val)) + return true; // Parse error! + this->setValue(Val); + this->setPosition(pos); + return false; + } + + virtual enum ValueExpected getValueExpectedFlagDefault() const { + return Parser.getValueExpectedFlagDefault(); + } + virtual void getExtraOptionNames(std::vector<const char*> &OptionNames) { + return Parser.getExtraOptionNames(OptionNames); + } + + // Forward printing stuff to the parser... + virtual size_t getOptionWidth() const {return Parser.getOptionWidth(*this);} + virtual void printOptionInfo(size_t GlobalWidth) const { + Parser.printOptionInfo(*this, GlobalWidth); + } + + void done() { + addArgument(); + Parser.initialize(*this); + } +public: + // setInitialValue - Used by the cl::init modifier... + void setInitialValue(const DataType &V) { this->setValue(V); } + + ParserClass &getParser() { return Parser; } + + operator DataType() const { return this->getValue(); } + + template<class T> + DataType &operator=(const T &Val) { + this->setValue(Val); + return this->getValue(); + } + + // One option... + template<class M0t> + explicit opt(const M0t &M0) : Option(Optional | NotHidden) { + apply(M0, this); + done(); + } + + // Two options... + template<class M0t, class M1t> + opt(const M0t &M0, const M1t &M1) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); + done(); + } + + // Three options... + template<class M0t, class M1t, class M2t> + opt(const M0t &M0, const M1t &M1, + const M2t &M2) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); + done(); + } + // Four options... + template<class M0t, class M1t, class M2t, class M3t> + opt(const M0t &M0, const M1t &M1, const M2t &M2, + const M3t &M3) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + done(); + } + // Five options... + template<class M0t, class M1t, class M2t, class M3t, class M4t> + opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); + done(); + } + // Six options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t> + opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); + done(); + } + // Seven options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t, class M6t> + opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5, + const M6t &M6) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); apply(M6, this); + done(); + } + // Eight options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t, class M6t, class M7t> + opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5, const M6t &M6, + const M7t &M7) : Option(Optional | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this); + done(); + } +}; + +EXTERN_TEMPLATE_INSTANTIATION(class opt<unsigned>); +EXTERN_TEMPLATE_INSTANTIATION(class opt<int>); +EXTERN_TEMPLATE_INSTANTIATION(class opt<std::string>); +EXTERN_TEMPLATE_INSTANTIATION(class opt<char>); +EXTERN_TEMPLATE_INSTANTIATION(class opt<bool>); + +//===----------------------------------------------------------------------===// +// list_storage class + +// Default storage class definition: external storage. This implementation +// assumes the user will specify a variable to store the data into with the +// cl::location(x) modifier. +// +template<class DataType, class StorageClass> +class list_storage { + StorageClass *Location; // Where to store the object... + +public: + list_storage() : Location(0) {} + + bool setLocation(Option &O, StorageClass &L) { + if (Location) + return O.error(": cl::location(x) specified more than once!"); + Location = &L; + return false; + } + + template<class T> + void addValue(const T &V) { + assert(Location != 0 && "cl::location(...) not specified for a command " + "line option with external storage!"); + Location->push_back(V); + } +}; + + +// Define how to hold a class type object, such as a string. Since we can +// inherit from a class, we do so. This makes us exactly compatible with the +// object in all cases that it is used. +// +template<class DataType> +class list_storage<DataType, bool> : public std::vector<DataType> { +public: + template<class T> + void addValue(const T &V) { push_back(V); } +}; + + +//===----------------------------------------------------------------------===// +// list - A list of command line options. +// +template <class DataType, class Storage = bool, + class ParserClass = parser<DataType> > +class list : public Option, public list_storage<DataType, Storage> { + std::vector<unsigned> Positions; + ParserClass Parser; + + virtual enum ValueExpected getValueExpectedFlagDefault() const { + return Parser.getValueExpectedFlagDefault(); + } + virtual void getExtraOptionNames(std::vector<const char*> &OptionNames) { + return Parser.getExtraOptionNames(OptionNames); + } + + virtual bool handleOccurrence(unsigned pos, const char *ArgName, + const std::string &Arg) { + typename ParserClass::parser_data_type Val = + typename ParserClass::parser_data_type(); + if (Parser.parse(*this, ArgName, Arg, Val)) + return true; // Parse Error! + addValue(Val); + setPosition(pos); + Positions.push_back(pos); + return false; + } + + // Forward printing stuff to the parser... + virtual size_t getOptionWidth() const {return Parser.getOptionWidth(*this);} + virtual void printOptionInfo(size_t GlobalWidth) const { + Parser.printOptionInfo(*this, GlobalWidth); + } + + void done() { + addArgument(); + Parser.initialize(*this); + } +public: + ParserClass &getParser() { return Parser; } + + unsigned getPosition(unsigned optnum) const { + assert(optnum < this->size() && "Invalid option index"); + return Positions[optnum]; + } + + void setNumAdditionalVals(unsigned n) { + Option::setNumAdditionalVals(n); + } + + // One option... + template<class M0t> + explicit list(const M0t &M0) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); + done(); + } + // Two options... + template<class M0t, class M1t> + list(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); + done(); + } + // Three options... + template<class M0t, class M1t, class M2t> + list(const M0t &M0, const M1t &M1, const M2t &M2) + : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); + done(); + } + // Four options... + template<class M0t, class M1t, class M2t, class M3t> + list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3) + : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + done(); + } + // Five options... + template<class M0t, class M1t, class M2t, class M3t, class M4t> + list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); + done(); + } + // Six options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t> + list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); + done(); + } + // Seven options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t, class M6t> + list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5, const M6t &M6) + : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); apply(M6, this); + done(); + } + // Eight options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t, class M6t, class M7t> + list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5, const M6t &M6, + const M7t &M7) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this); + done(); + } +}; + +// multi_arg - Modifier to set the number of additional values. +struct multi_val { + unsigned AdditionalVals; + explicit multi_val(unsigned N) : AdditionalVals(N) {} + + template <typename D, typename S, typename P> + void apply(list<D, S, P> &L) const { L.setNumAdditionalVals(AdditionalVals); } +}; + + +//===----------------------------------------------------------------------===// +// bits_storage class + +// Default storage class definition: external storage. This implementation +// assumes the user will specify a variable to store the data into with the +// cl::location(x) modifier. +// +template<class DataType, class StorageClass> +class bits_storage { + unsigned *Location; // Where to store the bits... + + template<class T> + static unsigned Bit(const T &V) { + unsigned BitPos = reinterpret_cast<unsigned>(V); + assert(BitPos < sizeof(unsigned) * CHAR_BIT && + "enum exceeds width of bit vector!"); + return 1 << BitPos; + } + +public: + bits_storage() : Location(0) {} + + bool setLocation(Option &O, unsigned &L) { + if (Location) + return O.error(": cl::location(x) specified more than once!"); + Location = &L; + return false; + } + + template<class T> + void addValue(const T &V) { + assert(Location != 0 && "cl::location(...) not specified for a command " + "line option with external storage!"); + *Location |= Bit(V); + } + + unsigned getBits() { return *Location; } + + template<class T> + bool isSet(const T &V) { + return (*Location & Bit(V)) != 0; + } +}; + + +// Define how to hold bits. Since we can inherit from a class, we do so. +// This makes us exactly compatible with the bits in all cases that it is used. +// +template<class DataType> +class bits_storage<DataType, bool> { + unsigned Bits; // Where to store the bits... + + template<class T> + static unsigned Bit(const T &V) { + unsigned BitPos = reinterpret_cast<unsigned>(V); + assert(BitPos < sizeof(unsigned) * CHAR_BIT && + "enum exceeds width of bit vector!"); + return 1 << BitPos; + } + +public: + template<class T> + void addValue(const T &V) { + Bits |= Bit(V); + } + + unsigned getBits() { return Bits; } + + template<class T> + bool isSet(const T &V) { + return (Bits & Bit(V)) != 0; + } +}; + + +//===----------------------------------------------------------------------===// +// bits - A bit vector of command options. +// +template <class DataType, class Storage = bool, + class ParserClass = parser<DataType> > +class bits : public Option, public bits_storage<DataType, Storage> { + std::vector<unsigned> Positions; + ParserClass Parser; + + virtual enum ValueExpected getValueExpectedFlagDefault() const { + return Parser.getValueExpectedFlagDefault(); + } + virtual void getExtraOptionNames(std::vector<const char*> &OptionNames) { + return Parser.getExtraOptionNames(OptionNames); + } + + virtual bool handleOccurrence(unsigned pos, const char *ArgName, + const std::string &Arg) { + typename ParserClass::parser_data_type Val = + typename ParserClass::parser_data_type(); + if (Parser.parse(*this, ArgName, Arg, Val)) + return true; // Parse Error! + addValue(Val); + setPosition(pos); + Positions.push_back(pos); + return false; + } + + // Forward printing stuff to the parser... + virtual size_t getOptionWidth() const {return Parser.getOptionWidth(*this);} + virtual void printOptionInfo(size_t GlobalWidth) const { + Parser.printOptionInfo(*this, GlobalWidth); + } + + void done() { + addArgument(); + Parser.initialize(*this); + } +public: + ParserClass &getParser() { return Parser; } + + unsigned getPosition(unsigned optnum) const { + assert(optnum < this->size() && "Invalid option index"); + return Positions[optnum]; + } + + // One option... + template<class M0t> + explicit bits(const M0t &M0) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); + done(); + } + // Two options... + template<class M0t, class M1t> + bits(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); + done(); + } + // Three options... + template<class M0t, class M1t, class M2t> + bits(const M0t &M0, const M1t &M1, const M2t &M2) + : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); + done(); + } + // Four options... + template<class M0t, class M1t, class M2t, class M3t> + bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3) + : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + done(); + } + // Five options... + template<class M0t, class M1t, class M2t, class M3t, class M4t> + bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); + done(); + } + // Six options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t> + bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); + done(); + } + // Seven options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t, class M6t> + bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5, const M6t &M6) + : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); apply(M6, this); + done(); + } + // Eight options... + template<class M0t, class M1t, class M2t, class M3t, + class M4t, class M5t, class M6t, class M7t> + bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, + const M4t &M4, const M5t &M5, const M6t &M6, + const M7t &M7) : Option(ZeroOrMore | NotHidden) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this); + done(); + } +}; + +//===----------------------------------------------------------------------===// +// Aliased command line option (alias this name to a preexisting name) +// + +class alias : public Option { + Option *AliasFor; + virtual bool handleOccurrence(unsigned pos, const char * /*ArgName*/, + const std::string &Arg) { + return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg); + } + // Handle printing stuff... + virtual size_t getOptionWidth() const; + virtual void printOptionInfo(size_t GlobalWidth) const; + + void done() { + if (!hasArgStr()) + error(": cl::alias must have argument name specified!"); + if (AliasFor == 0) + error(": cl::alias must have an cl::aliasopt(option) specified!"); + addArgument(); + } +public: + void setAliasFor(Option &O) { + if (AliasFor) + error(": cl::alias must only have one cl::aliasopt(...) specified!"); + AliasFor = &O; + } + + // One option... + template<class M0t> + explicit alias(const M0t &M0) : Option(Optional | Hidden), AliasFor(0) { + apply(M0, this); + done(); + } + // Two options... + template<class M0t, class M1t> + alias(const M0t &M0, const M1t &M1) : Option(Optional | Hidden), AliasFor(0) { + apply(M0, this); apply(M1, this); + done(); + } + // Three options... + template<class M0t, class M1t, class M2t> + alias(const M0t &M0, const M1t &M1, const M2t &M2) + : Option(Optional | Hidden), AliasFor(0) { + apply(M0, this); apply(M1, this); apply(M2, this); + done(); + } + // Four options... + template<class M0t, class M1t, class M2t, class M3t> + alias(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3) + : Option(Optional | Hidden), AliasFor(0) { + apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); + done(); + } +}; + +// aliasfor - Modifier to set the option an alias aliases. +struct aliasopt { + Option &Opt; + explicit aliasopt(Option &O) : Opt(O) {} + void apply(alias &A) const { A.setAliasFor(Opt); } +}; + +// extrahelp - provide additional help at the end of the normal help +// output. All occurrences of cl::extrahelp will be accumulated and +// printed to std::cerr at the end of the regular help, just before +// exit is called. +struct extrahelp { + const char * morehelp; + explicit extrahelp(const char* help); +}; + +void PrintVersionMessage(); +// This function just prints the help message, exactly the same way as if the +// --help option had been given on the command line. +// NOTE: THIS FUNCTION TERMINATES THE PROGRAM! +void PrintHelpMessage(); + +} // End namespace cl + +} // End namespace llvm + +#endif diff --git a/include/llvm/Support/Compiler.h b/include/llvm/Support/Compiler.h new file mode 100644 index 0000000..90292df --- /dev/null +++ b/include/llvm/Support/Compiler.h @@ -0,0 +1,59 @@ +//===-- llvm/Support/Compiler.h - Compiler abstraction support --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines several macros, based on the current compiler. This allows +// use of compiler-specific features in a way that remains portable. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_COMPILER_H +#define LLVM_SUPPORT_COMPILER_H + +// The VISIBILITY_HIDDEN macro, used for marking classes with the GCC-specific +// visibility("hidden") attribute. +#if (__GNUC__ >= 4) && !defined(__MINGW32__) && !defined(__CYGWIN__) +#define VISIBILITY_HIDDEN __attribute__ ((visibility("hidden"))) +#else +#define VISIBILITY_HIDDEN +#endif + +#if (__GNUC__ >= 4) +#define ATTRIBUTE_USED __attribute__((__used__)) +#else +#define ATTRIBUTE_USED +#endif + +#if (__GNUC__ >= 4) +#define BUILTIN_EXPECT(EXPR, VALUE) __builtin_expect((EXPR), (VALUE)) +#else +#define BUILTIN_EXPECT(EXPR, VALUE) (EXPR) +#endif + +// C++ doesn't support 'extern template' of template specializations. GCC does, +// but requires __extension__ before it. In the header, use this: +// EXTERN_TEMPLATE_INSTANTIATION(class foo<bar>); +// in the .cpp file, use this: +// TEMPLATE_INSTANTIATION(class foo<bar>); +#ifdef __GNUC__ +#define EXTERN_TEMPLATE_INSTANTIATION(X) __extension__ extern template X +#define TEMPLATE_INSTANTIATION(X) template X +#else +#define EXTERN_TEMPLATE_INSTANTIATION(X) +#define TEMPLATE_INSTANTIATION(X) +#endif + +// DISABLE_INLINE - On compilers where we have a directive to do so, mark a +// method "not for inlining". +#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +#define DISABLE_INLINE __attribute__((noinline)) +#else +#define DISABLE_INLINE +#endif + +#endif diff --git a/include/llvm/Support/ConstantFolder.h b/include/llvm/Support/ConstantFolder.h new file mode 100644 index 0000000..ca8bcae --- /dev/null +++ b/include/llvm/Support/ConstantFolder.h @@ -0,0 +1,189 @@ +//===-- llvm/Support/ConstantFolder.h - Constant folding helper -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ConstantFolder class, a helper for IRBuilder. +// It provides IRBuilder with a set of methods for creating constants +// with minimal folding. For general constant creation and folding, +// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_CONSTANTFOLDER_H +#define LLVM_SUPPORT_CONSTANTFOLDER_H + +#include "llvm/Constants.h" + +namespace llvm { + +/// ConstantFolder - Create constants with minimum, target independent, folding. +class ConstantFolder { +public: + + //===--------------------------------------------------------------------===// + // Binary Operators + //===--------------------------------------------------------------------===// + + Constant *CreateAdd(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getAdd(LHS, RHS); + } + Constant *CreateSub(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getSub(LHS, RHS); + } + Constant *CreateMul(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getMul(LHS, RHS); + } + Constant *CreateUDiv(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getUDiv(LHS, RHS); + } + Constant *CreateSDiv(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getSDiv(LHS, RHS); + } + Constant *CreateFDiv(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getFDiv(LHS, RHS); + } + Constant *CreateURem(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getURem(LHS, RHS); + } + Constant *CreateSRem(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getSRem(LHS, RHS); + } + Constant *CreateFRem(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getFRem(LHS, RHS); + } + Constant *CreateShl(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getShl(LHS, RHS); + } + Constant *CreateLShr(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getLShr(LHS, RHS); + } + Constant *CreateAShr(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getAShr(LHS, RHS); + } + Constant *CreateAnd(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getAnd(LHS, RHS); + } + Constant *CreateOr(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getOr(LHS, RHS); + } + Constant *CreateXor(Constant *LHS, Constant *RHS) const { + return ConstantExpr::getXor(LHS, RHS); + } + + Constant *CreateBinOp(Instruction::BinaryOps Opc, + Constant *LHS, Constant *RHS) const { + return ConstantExpr::get(Opc, LHS, RHS); + } + + //===--------------------------------------------------------------------===// + // Unary Operators + //===--------------------------------------------------------------------===// + + Constant *CreateNeg(Constant *C) const { + return ConstantExpr::getNeg(C); + } + Constant *CreateNot(Constant *C) const { + return ConstantExpr::getNot(C); + } + + //===--------------------------------------------------------------------===// + // Memory Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateGetElementPtr(Constant *C, Constant* const *IdxList, + unsigned NumIdx) const { + return ConstantExpr::getGetElementPtr(C, IdxList, NumIdx); + } + Constant *CreateGetElementPtr(Constant *C, Value* const *IdxList, + unsigned NumIdx) const { + return ConstantExpr::getGetElementPtr(C, IdxList, NumIdx); + } + + //===--------------------------------------------------------------------===// + // Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + Constant *CreateCast(Instruction::CastOps Op, Constant *C, + const Type *DestTy) const { + return ConstantExpr::getCast(Op, C, DestTy); + } + Constant *CreateIntCast(Constant *C, const Type *DestTy, + bool isSigned) const { + return ConstantExpr::getIntegerCast(C, DestTy, isSigned); + } + + Constant *CreateBitCast(Constant *C, const Type *DestTy) const { + return CreateCast(Instruction::BitCast, C, DestTy); + } + Constant *CreateIntToPtr(Constant *C, const Type *DestTy) const { + return CreateCast(Instruction::IntToPtr, C, DestTy); + } + Constant *CreatePtrToInt(Constant *C, const Type *DestTy) const { + return CreateCast(Instruction::PtrToInt, C, DestTy); + } + Constant *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const { + return ConstantExpr::getTruncOrBitCast(C, DestTy); + } + + //===--------------------------------------------------------------------===// + // Compare Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return ConstantExpr::getCompare(P, LHS, RHS); + } + Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return ConstantExpr::getCompare(P, LHS, RHS); + } + Constant *CreateVICmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return ConstantExpr::getCompare(P, LHS, RHS); + } + Constant *CreateVFCmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return ConstantExpr::getCompare(P, LHS, RHS); + } + + //===--------------------------------------------------------------------===// + // Other Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const { + return ConstantExpr::getSelect(C, True, False); + } + + Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const { + return ConstantExpr::getExtractElement(Vec, Idx); + } + + Constant *CreateInsertElement(Constant *Vec, Constant *NewElt, + Constant *Idx) const { + return ConstantExpr::getInsertElement(Vec, NewElt, Idx); + } + + Constant *CreateShuffleVector(Constant *V1, Constant *V2, + Constant *Mask) const { + return ConstantExpr::getShuffleVector(V1, V2, Mask); + } + + Constant *CreateExtractValue(Constant *Agg, const unsigned *IdxList, + unsigned NumIdx) const { + return ConstantExpr::getExtractValue(Agg, IdxList, NumIdx); + } + + Constant *CreateInsertValue(Constant *Agg, Constant *Val, + const unsigned *IdxList, unsigned NumIdx) const { + return ConstantExpr::getInsertValue(Agg, Val, IdxList, NumIdx); + } +}; + +} + +#endif diff --git a/include/llvm/Support/ConstantRange.h b/include/llvm/Support/ConstantRange.h new file mode 100644 index 0000000..098fab5 --- /dev/null +++ b/include/llvm/Support/ConstantRange.h @@ -0,0 +1,195 @@ +//===-- llvm/Support/ConstantRange.h - Represent a range --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Represent a range of possible values that may occur when the program is run +// for an integral value. This keeps track of a lower and upper bound for the +// constant, which MAY wrap around the end of the numeric range. To do this, it +// keeps track of a [lower, upper) bound, which specifies an interval just like +// STL iterators. When used with boolean values, the following are important +// ranges: : +// +// [F, F) = {} = Empty set +// [T, F) = {T} +// [F, T) = {F} +// [T, T) = {F, T} = Full set +// +// The other integral ranges use min/max values for special range values. For +// example, for 8-bit types, it uses: +// [0, 0) = {} = Empty set +// [255, 255) = {0..255} = Full Set +// +// Note that ConstantRange always keeps unsigned values. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_CONSTANT_RANGE_H +#define LLVM_SUPPORT_CONSTANT_RANGE_H + +#include "llvm/ADT/APInt.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class ConstantRange { + APInt Lower, Upper; + static ConstantRange intersect1Wrapped(const ConstantRange &LHS, + const ConstantRange &RHS); + public: + /// Initialize a full (the default) or empty set for the specified bit width. + /// + explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true); + + /// Initialize a range to hold the single specified value. + /// + ConstantRange(const APInt &Value); + + /// @brief Initialize a range of values explicitly. This will assert out if + /// Lower==Upper and Lower != Min or Max value for its type. It will also + /// assert out if the two APInt's are not the same bit width. + ConstantRange(const APInt& Lower, const APInt& Upper); + + /// getLower - Return the lower value for this range... + /// + const APInt &getLower() const { return Lower; } + + /// getUpper - Return the upper value for this range... + /// + const APInt &getUpper() const { return Upper; } + + /// getBitWidth - get the bit width of this ConstantRange + /// + uint32_t getBitWidth() const { return Lower.getBitWidth(); } + + /// isFullSet - Return true if this set contains all of the elements possible + /// for this data-type + /// + bool isFullSet() const; + + /// isEmptySet - Return true if this set contains no members. + /// + bool isEmptySet() const; + + /// isWrappedSet - Return true if this set wraps around the top of the range, + /// for example: [100, 8) + /// + bool isWrappedSet() const; + + /// contains - Return true if the specified value is in the set. + /// + bool contains(const APInt &Val) const; + + /// getSingleElement - If this set contains a single element, return it, + /// otherwise return null. + /// + const APInt *getSingleElement() const { + if (Upper == Lower + 1) + return &Lower; + return 0; + } + + /// isSingleElement - Return true if this set contains exactly one member. + /// + bool isSingleElement() const { return getSingleElement() != 0; } + + /// getSetSize - Return the number of elements in this set. + /// + APInt getSetSize() const; + + /// getUnsignedMax - Return the largest unsigned value contained in the + /// ConstantRange. + /// + APInt getUnsignedMax() const; + + /// getUnsignedMin - Return the smallest unsigned value contained in the + /// ConstantRange. + /// + APInt getUnsignedMin() const; + + /// getSignedMax - Return the largest signed value contained in the + /// ConstantRange. + /// + APInt getSignedMax() const; + + /// getSignedMin - Return the smallest signed value contained in the + /// ConstantRange. + /// + APInt getSignedMin() const; + + /// operator== - Return true if this range is equal to another range. + /// + bool operator==(const ConstantRange &CR) const { + return Lower == CR.Lower && Upper == CR.Upper; + } + bool operator!=(const ConstantRange &CR) const { + return !operator==(CR); + } + + /// subtract - Subtract the specified constant from the endpoints of this + /// constant range. + ConstantRange subtract(const APInt &CI) const; + + /// intersectWith - Return the range that results from the intersection of + /// this range with another range. The resultant range is pruned as much as + /// possible, but there may be cases where elements are included that are in + /// one of the sets but not the other. For example: [100, 8) intersect [3, + /// 120) yields [3, 120) + /// + ConstantRange intersectWith(const ConstantRange &CR) const; + + /// maximalIntersectWith - Return the range that results from the intersection + /// of this range with another range. The resultant range is guaranteed to + /// include all elements contained in both input ranges, and to have the + /// smallest possible set size that does so. Because there may be two + /// intersections with the same set size, A.maximalIntersectWith(B) might not + /// be equal to B.maximalIntersectWith(A). + /// + ConstantRange maximalIntersectWith(const ConstantRange &CR) const; + + /// unionWith - Return the range that results from the union of this range + /// with another range. The resultant range is guaranteed to include the + /// elements of both sets, but may contain more. For example, [3, 9) union + /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included + /// in either set before. + /// + ConstantRange unionWith(const ConstantRange &CR) const; + + /// zeroExtend - Return a new range in the specified integer type, which must + /// be strictly larger than the current type. The returned range will + /// correspond to the possible range of values if the source range had been + /// zero extended to BitWidth. + ConstantRange zeroExtend(uint32_t BitWidth) const; + + /// signExtend - Return a new range in the specified integer type, which must + /// be strictly larger than the current type. The returned range will + /// correspond to the possible range of values if the source range had been + /// sign extended to BitWidth. + ConstantRange signExtend(uint32_t BitWidth) const; + + /// truncate - Return a new range in the specified integer type, which must be + /// strictly smaller than the current type. The returned range will + /// correspond to the possible range of values if the source range had been + /// truncated to the specified type. + ConstantRange truncate(uint32_t BitWidth) const; + + /// print - Print out the bounds to a stream... + /// + void print(raw_ostream &OS) const; + + /// dump - Allow printing from a debugger easily... + /// + void dump() const; +}; + +inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) { + CR.print(OS); + return OS; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/DOTGraphTraits.h b/include/llvm/Support/DOTGraphTraits.h new file mode 100644 index 0000000..7a61b2b --- /dev/null +++ b/include/llvm/Support/DOTGraphTraits.h @@ -0,0 +1,141 @@ +//===-- llvm/Support/DotGraphTraits.h - Customize .dot output ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a template class that can be used to customize dot output +// graphs generated by the GraphWriter.h file. The default implementation of +// this file will produce a simple, but not very polished graph. By +// specializing this template, lots of customization opportunities are possible. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_DOTGRAPHTRAITS_H +#define LLVM_SUPPORT_DOTGRAPHTRAITS_H + +#include <string> + +namespace llvm { + +/// DefaultDOTGraphTraits - This class provides the default implementations of +/// all of the DOTGraphTraits methods. If a specialization does not need to +/// override all methods here it should inherit so that it can get the default +/// implementations. +/// +struct DefaultDOTGraphTraits { + /// getGraphName - Return the label for the graph as a whole. Printed at the + /// top of the graph. + /// + template<typename GraphType> + static std::string getGraphName(const GraphType& Graph) { return ""; } + + /// getGraphProperties - Return any custom properties that should be included + /// in the top level graph structure for dot. + /// + template<typename GraphType> + static std::string getGraphProperties(const GraphType& Graph) { + return ""; + } + + /// renderGraphFromBottomUp - If this function returns true, the graph is + /// emitted bottom-up instead of top-down. This requires graphviz 2.0 to work + /// though. + static bool renderGraphFromBottomUp() { + return false; + } + + /// getNodeLabel - Given a node and a pointer to the top level graph, return + /// the label to print in the node. + template<typename GraphType> + static std::string getNodeLabel(const void *Node, const GraphType& Graph) { + return ""; + } + + /// hasNodeAddressLabel - If this method returns true, the address of the node + /// is added to the label of the node. + template<typename GraphType> + static bool hasNodeAddressLabel(const void *Node, const GraphType& Graph) { + return false; + } + + /// If you want to specify custom node attributes, this is the place to do so + /// + template<typename GraphType> + static std::string getNodeAttributes(const void *Node, + const GraphType& Graph) { + return ""; + } + + /// If you want to override the dot attributes printed for a particular edge, + /// override this method. + template<typename EdgeIter> + static std::string getEdgeAttributes(const void *Node, EdgeIter EI) { + return ""; + } + + /// getEdgeSourceLabel - If you want to label the edge source itself, + /// implement this method. + template<typename EdgeIter> + static std::string getEdgeSourceLabel(const void *Node, EdgeIter I) { + return ""; + } + + /// edgeTargetsEdgeSource - This method returns true if this outgoing edge + /// should actually target another edge source, not a node. If this method is + /// implemented, getEdgeTarget should be implemented. + template<typename EdgeIter> + static bool edgeTargetsEdgeSource(const void *Node, EdgeIter I) { + return false; + } + + /// getEdgeTarget - If edgeTargetsEdgeSource returns true, this method is + /// called to determine which outgoing edge of Node is the target of this + /// edge. + template<typename EdgeIter> + static EdgeIter getEdgeTarget(const void *Node, EdgeIter I) { + return I; + } + + /// hasEdgeDestLabels - If this function returns true, the graph is able + /// to provide labels for edge destinations. + static bool hasEdgeDestLabels() { + return false; + } + + /// numEdgeDestLabels - If hasEdgeDestLabels, this function returns the + /// number of incoming edge labels the given node has. + static unsigned numEdgeDestLabels(const void *Node) { + return 0; + } + + /// getEdgeDestLabel - If hasEdgeDestLabels, this function returns the + /// incoming edge label with the given index in the given node. + static std::string getEdgeDestLabel(const void *Node, unsigned i) { + return ""; + } + + /// addCustomGraphFeatures - If a graph is made up of more than just + /// straight-forward nodes and edges, this is the place to put all of the + /// custom stuff necessary. The GraphWriter object, instantiated with your + /// GraphType is passed in as an argument. You may call arbitrary methods on + /// it to add things to the output graph. + /// + template<typename GraphType, typename GraphWriter> + static void addCustomGraphFeatures(const GraphType& Graph, GraphWriter &GW) {} +}; + + +/// DOTGraphTraits - Template class that can be specialized to customize how +/// graphs are converted to 'dot' graphs. When specializing, you may inherit +/// from DefaultDOTGraphTraits if you don't need to override everything. +/// +template <typename Ty> +struct DOTGraphTraits : public DefaultDOTGraphTraits {}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/DataFlow.h b/include/llvm/Support/DataFlow.h new file mode 100644 index 0000000..8f79ead --- /dev/null +++ b/include/llvm/Support/DataFlow.h @@ -0,0 +1,103 @@ +//===-- llvm/Support/DataFlow.h - dataflow as graphs ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines specializations of GraphTraits that allows Use-Def and +// Def-Use relations to be treated as proper graphs for generic algorithms. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_DATAFLOW_H +#define LLVM_SUPPORT_DATAFLOW_H + +#include "llvm/User.h" +#include "llvm/ADT/GraphTraits.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Provide specializations of GraphTraits to be able to treat def-use/use-def +// chains as graphs + +template <> struct GraphTraits<const Value*> { + typedef const Value NodeType; + typedef Value::use_const_iterator ChildIteratorType; + + static NodeType *getEntryNode(const Value *G) { + return G; + } + + static inline ChildIteratorType child_begin(NodeType *N) { + return N->use_begin(); + } + + static inline ChildIteratorType child_end(NodeType *N) { + return N->use_end(); + } +}; + +template <> struct GraphTraits<Value*> { + typedef Value NodeType; + typedef Value::use_iterator ChildIteratorType; + + static NodeType *getEntryNode(Value *G) { + return G; + } + + static inline ChildIteratorType child_begin(NodeType *N) { + return N->use_begin(); + } + + static inline ChildIteratorType child_end(NodeType *N) { + return N->use_end(); + } +}; + +template <> struct GraphTraits<Inverse<const User*> > { + typedef const Value NodeType; + typedef User::const_op_iterator ChildIteratorType; + + static NodeType *getEntryNode(Inverse<const User*> G) { + return G.Graph; + } + + static inline ChildIteratorType child_begin(NodeType *N) { + if (const User *U = dyn_cast<User>(N)) + return U->op_begin(); + return NULL; + } + + static inline ChildIteratorType child_end(NodeType *N) { + if(const User *U = dyn_cast<User>(N)) + return U->op_end(); + return NULL; + } +}; + +template <> struct GraphTraits<Inverse<User*> > { + typedef Value NodeType; + typedef User::op_iterator ChildIteratorType; + + static NodeType *getEntryNode(Inverse<User*> G) { + return G.Graph; + } + + static inline ChildIteratorType child_begin(NodeType *N) { + if (User *U = dyn_cast<User>(N)) + return U->op_begin(); + return NULL; + } + + static inline ChildIteratorType child_end(NodeType *N) { + if (User *U = dyn_cast<User>(N)) + return U->op_end(); + return NULL; + } +}; + +} +#endif diff --git a/include/llvm/Support/DataTypes.h.cmake b/include/llvm/Support/DataTypes.h.cmake new file mode 100644 index 0000000..4d6fcc8 --- /dev/null +++ b/include/llvm/Support/DataTypes.h.cmake @@ -0,0 +1,147 @@ +//===-- include/Support/DataTypes.h - Define fixed size types ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains definitions to figure out the size of _HOST_ data types. +// This file is important because different host OS's define different macros, +// which makes portability tough. This file exports the following definitions: +// +// [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types +// [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. +// +// No library is required when using these functinons. +// +//===----------------------------------------------------------------------===// + +#ifndef SUPPORT_DATATYPES_H +#define SUPPORT_DATATYPES_H + +#cmakedefine HAVE_SYS_TYPES_H ${HAVE_SYS_TYPES_H} +#cmakedefine HAVE_INTTYPES_H ${HAVE_INTTYPES_H} +#cmakedefine HAVE_STDINT_H ${HAVE_STDINT_H} +#undef HAVE_UINT64_T +#undef HAVE_U_INT64_T + +// FIXME: UGLY HACK (Added by Kevin) +#define HAVE_UINT64_T 1 + +#ifndef _MSC_VER + +// Note that this header's correct operation depends on __STDC_LIMIT_MACROS +// being defined. We would define it here, but in order to prevent Bad Things +// happening when system headers or C++ STL headers include stdint.h before +// we define it here, we define it on the g++ command line (in Makefile.rules). +#if !defined(__STDC_LIMIT_MACROS) +# error "Must #define __STDC_LIMIT_MACROS before #including Support/DataTypes.h" +#endif + +#if !defined(__STDC_CONSTANT_MACROS) +# error "Must #define __STDC_CONSTANT_MACROS before " \ + "#including Support/DataTypes.h" +#endif + +// Note that <inttypes.h> includes <stdint.h>, if this is a C99 system. +#ifdef HAVE_SYS_TYPES_H +#include <sys/types.h> +#endif + +#ifdef HAVE_INTTYPES_H +#include <inttypes.h> +#endif + +#ifdef HAVE_STDINT_H +#include <stdint.h> +#endif + +#ifdef __cplusplus +#include <cmath> +#else +#include <math.h> +#endif + +#ifdef _AIX +#include "llvm/Support/AIXDataTypesFix.h" +#endif + +// Handle incorrect definition of uint64_t as u_int64_t +#ifndef HAVE_UINT64_T +#ifdef HAVE_U_INT64_T +typedef u_int64_t uint64_t; +#else +# error "Don't have a definition for uint64_t on this platform" +#endif +#endif + +#ifdef _OpenBSD_ +#define INT8_MAX 127 +#define INT8_MIN -128 +#define UINT8_MAX 255 +#define INT16_MAX 32767 +#define INT16_MIN -32768 +#define UINT16_MAX 65535 +#define INT32_MAX 2147483647 +#define INT32_MIN -2147483648 +#define UINT32_MAX 4294967295U +#endif + +#else /* _MSC_VER */ +// Visual C++ doesn't provide standard integer headers, but it does provide +// built-in data types. +#include <stdlib.h> +#include <stddef.h> +#include <sys/types.h> +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef short int16_t; +typedef unsigned short uint16_t; +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed int ssize_t; +#define INT8_MAX 127 +#define INT8_MIN -128 +#define UINT8_MAX 255 +#define INT16_MAX 32767 +#define INT16_MIN -32768 +#define UINT16_MAX 65535 +#define INT32_MAX 2147483647 +#define INT32_MIN -2147483648 +#define UINT32_MAX 4294967295U +#define INT8_C(C) C +#define UINT8_C(C) C +#define INT16_C(C) C +#define UINT16_C(C) C +#define INT32_C(C) C +#define UINT32_C(C) C ## U +#define INT64_C(C) ((int64_t) C ## LL) +#define UINT64_C(C) ((uint64_t) C ## ULL) +#endif /* _MSC_VER */ + +/* Set defaults for constants which we cannot find. */ +#if !defined(INT64_MAX) +# define INT64_MAX 9223372036854775807LL +#endif +#if !defined(INT64_MIN) +# define INT64_MIN ((-INT64_MAX)-1) +#endif +#if !defined(UINT64_MAX) +# define UINT64_MAX 0xffffffffffffffffULL +#endif + +#if __GNUC__ > 3 +#define END_WITH_NULL __attribute__((sentinel)) +#else +#define END_WITH_NULL +#endif + +#ifndef HUGE_VALF +#define HUGE_VALF (float)HUGE_VAL +#endif + +#endif /* SUPPORT_DATATYPES_H */ diff --git a/include/llvm/Support/DataTypes.h.in b/include/llvm/Support/DataTypes.h.in new file mode 100644 index 0000000..72063f7 --- /dev/null +++ b/include/llvm/Support/DataTypes.h.in @@ -0,0 +1,144 @@ +//===-- include/Support/DataTypes.h - Define fixed size types ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains definitions to figure out the size of _HOST_ data types. +// This file is important because different host OS's define different macros, +// which makes portability tough. This file exports the following definitions: +// +// [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types +// [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. +// +// No library is required when using these functinons. +// +//===----------------------------------------------------------------------===// + +#ifndef SUPPORT_DATATYPES_H +#define SUPPORT_DATATYPES_H + +#undef HAVE_SYS_TYPES_H +#undef HAVE_INTTYPES_H +#undef HAVE_STDINT_H +#undef HAVE_UINT64_T +#undef HAVE_U_INT64_T + +#ifndef _MSC_VER + +// Note that this header's correct operation depends on __STDC_LIMIT_MACROS +// being defined. We would define it here, but in order to prevent Bad Things +// happening when system headers or C++ STL headers include stdint.h before +// we define it here, we define it on the g++ command line (in Makefile.rules). +#if !defined(__STDC_LIMIT_MACROS) +# error "Must #define __STDC_LIMIT_MACROS before #including Support/DataTypes.h" +#endif + +#if !defined(__STDC_CONSTANT_MACROS) +# error "Must #define __STDC_CONSTANT_MACROS before " \ + "#including Support/DataTypes.h" +#endif + +// Note that <inttypes.h> includes <stdint.h>, if this is a C99 system. +#ifdef HAVE_SYS_TYPES_H +#include <sys/types.h> +#endif + +#ifdef HAVE_INTTYPES_H +#include <inttypes.h> +#endif + +#ifdef HAVE_STDINT_H +#include <stdint.h> +#endif + +#ifdef __cplusplus +#include <cmath> +#else +#include <math.h> +#endif + +#ifdef _AIX +#include "llvm/Support/AIXDataTypesFix.h" +#endif + +// Handle incorrect definition of uint64_t as u_int64_t +#ifndef HAVE_UINT64_T +#ifdef HAVE_U_INT64_T +typedef u_int64_t uint64_t; +#else +# error "Don't have a definition for uint64_t on this platform" +#endif +#endif + +#ifdef _OpenBSD_ +#define INT8_MAX 127 +#define INT8_MIN -128 +#define UINT8_MAX 255 +#define INT16_MAX 32767 +#define INT16_MIN -32768 +#define UINT16_MAX 65535 +#define INT32_MAX 2147483647 +#define INT32_MIN -2147483648 +#define UINT32_MAX 4294967295U +#endif + +#else /* _MSC_VER */ +// Visual C++ doesn't provide standard integer headers, but it does provide +// built-in data types. +#include <stdlib.h> +#include <stddef.h> +#include <sys/types.h> +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef short int16_t; +typedef unsigned short uint16_t; +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed int ssize_t; +#define INT8_MAX 127 +#define INT8_MIN -128 +#define UINT8_MAX 255 +#define INT16_MAX 32767 +#define INT16_MIN -32768 +#define UINT16_MAX 65535 +#define INT32_MAX 2147483647 +#define INT32_MIN -2147483648 +#define UINT32_MAX 4294967295U +#define INT8_C(C) C +#define UINT8_C(C) C +#define INT16_C(C) C +#define UINT16_C(C) C +#define INT32_C(C) C +#define UINT32_C(C) C ## U +#define INT64_C(C) ((int64_t) C ## LL) +#define UINT64_C(C) ((uint64_t) C ## ULL) +#endif /* _MSC_VER */ + +/* Set defaults for constants which we cannot find. */ +#if !defined(INT64_MAX) +# define INT64_MAX 9223372036854775807LL +#endif +#if !defined(INT64_MIN) +# define INT64_MIN ((-INT64_MAX)-1) +#endif +#if !defined(UINT64_MAX) +# define UINT64_MAX 0xffffffffffffffffULL +#endif + +#if __GNUC__ > 3 +#define END_WITH_NULL __attribute__((sentinel)) +#else +#define END_WITH_NULL +#endif + +#ifndef HUGE_VALF +#define HUGE_VALF (float)HUGE_VAL +#endif + +#endif /* SUPPORT_DATATYPES_H */ diff --git a/include/llvm/Support/Debug.h b/include/llvm/Support/Debug.h new file mode 100644 index 0000000..52d0d3f --- /dev/null +++ b/include/llvm/Support/Debug.h @@ -0,0 +1,78 @@ +//===- llvm/Support/Debug.h - Easy way to add debug output ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a handy way of adding debugging information to your +// code, without it being enabled all of the time, and without having to add +// command line options to enable it. +// +// In particular, just wrap your code with the DEBUG() macro, and it will be +// enabled automatically if you specify '-debug' on the command-line. +// Alternatively, you can also use the SET_DEBUG_TYPE("foo") macro to specify +// that your debug code belongs to class "foo". Then, on the command line, you +// can specify '-debug-only=foo' to enable JUST the debug information for the +// foo class. +// +// When compiling in release mode, the -debug-* options and all code in DEBUG() +// statements disappears, so it does not effect the runtime of the code. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_DEBUG_H +#define LLVM_SUPPORT_DEBUG_H + +#include "llvm/Support/Streams.h" + +namespace llvm { + +// DebugFlag - This boolean is set to true if the '-debug' command line option +// is specified. This should probably not be referenced directly, instead, use +// the DEBUG macro below. +// +extern bool DebugFlag; + +// isCurrentDebugType - Return true if the specified string is the debug type +// specified on the command line, or if none was specified on the command line +// with the -debug-only=X option. +// +bool isCurrentDebugType(const char *Type); + +// DEBUG macro - This macro should be used by passes to emit debug information. +// In the '-debug' option is specified on the commandline, and if this is a +// debug build, then the code specified as the option to the macro will be +// executed. Otherwise it will not be. Example: +// +// DEBUG(cerr << "Bitset contains: " << Bitset << "\n"); +// + +#ifndef DEBUG_TYPE +#define DEBUG_TYPE "" +#endif + +#ifdef NDEBUG +#define DEBUG(X) +#else +#define DEBUG(X) \ + do { if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) { X; } } while (0) +#endif + +/// getErrorOutputStream - Returns the error output stream (std::cerr). This +/// places the std::c* I/O streams into one .cpp file and relieves the whole +/// program from having to have hundreds of static c'tor/d'tors for them. +/// +OStream &getErrorOutputStream(const char *DebugType); + +#ifdef NDEBUG +#define DOUT llvm::OStream(0) +#else +#define DOUT llvm::getErrorOutputStream(DEBUG_TYPE) +#endif + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/Dwarf.h b/include/llvm/Support/Dwarf.h new file mode 100644 index 0000000..55838b8 --- /dev/null +++ b/include/llvm/Support/Dwarf.h @@ -0,0 +1,587 @@ +//===-- llvm/Support/Dwarf.h ---Dwarf Constants------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains constants used for implementing Dwarf debug support. For +// Details on the Dwarf 3 specfication see DWARF Debugging Information Format +// V.3 reference manual http://dwarf.freestandards.org , +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_DWARF_H +#define LLVM_SUPPORT_DWARF_H + +namespace llvm { + +//===----------------------------------------------------------------------===// +// Debug info constants. + +enum { + LLVMDebugVersion = (7 << 16), // Current version of debug information. + LLVMDebugVersion6 = (6 << 16), // Constant for version 6. + LLVMDebugVersion5 = (5 << 16), // Constant for version 5. + LLVMDebugVersion4 = (4 << 16), // Constant for version 4. + LLVMDebugVersionMask = 0xffff0000 // Mask for version number. +}; + +namespace dwarf { + +//===----------------------------------------------------------------------===// +// Dwarf constants as gleaned from the DWARF Debugging Information Format V.3 +// reference manual http://dwarf.freestandards.org . +// + +// Do not mix the following two enumerations sets. DW_TAG_invalid changes the +// enumeration base type. + +enum llvm_dwarf_constants { + // llvm mock tags + DW_TAG_invalid = ~0U, // Tag for invalid results. + + DW_TAG_anchor = 0, // Tag for descriptor anchors. + DW_TAG_auto_variable = 0x100, // Tag for local (auto) variables. + DW_TAG_arg_variable = 0x101, // Tag for argument variables. + DW_TAG_return_variable = 0x102, // Tag for return variables. + + DW_TAG_vector_type = 0x103, // Tag for vector types. + + DW_TAG_user_base = 0x1000, // Recommended base for user tags. + + DW_CIE_VERSION = 1, // Common frame information version. + DW_CIE_ID = 0xffffffff // Common frame information mark. +}; + +enum dwarf_constants { + DWARF_VERSION = 2, + + // Tags + DW_TAG_array_type = 0x01, + DW_TAG_class_type = 0x02, + DW_TAG_entry_point = 0x03, + DW_TAG_enumeration_type = 0x04, + DW_TAG_formal_parameter = 0x05, + DW_TAG_imported_declaration = 0x08, + DW_TAG_label = 0x0a, + DW_TAG_lexical_block = 0x0b, + DW_TAG_member = 0x0d, + DW_TAG_pointer_type = 0x0f, + DW_TAG_reference_type = 0x10, + DW_TAG_compile_unit = 0x11, + DW_TAG_string_type = 0x12, + DW_TAG_structure_type = 0x13, + DW_TAG_subroutine_type = 0x15, + DW_TAG_typedef = 0x16, + DW_TAG_union_type = 0x17, + DW_TAG_unspecified_parameters = 0x18, + DW_TAG_variant = 0x19, + DW_TAG_common_block = 0x1a, + DW_TAG_common_inclusion = 0x1b, + DW_TAG_inheritance = 0x1c, + DW_TAG_inlined_subroutine = 0x1d, + DW_TAG_module = 0x1e, + DW_TAG_ptr_to_member_type = 0x1f, + DW_TAG_set_type = 0x20, + DW_TAG_subrange_type = 0x21, + DW_TAG_with_stmt = 0x22, + DW_TAG_access_declaration = 0x23, + DW_TAG_base_type = 0x24, + DW_TAG_catch_block = 0x25, + DW_TAG_const_type = 0x26, + DW_TAG_constant = 0x27, + DW_TAG_enumerator = 0x28, + DW_TAG_file_type = 0x29, + DW_TAG_friend = 0x2a, + DW_TAG_namelist = 0x2b, + DW_TAG_namelist_item = 0x2c, + DW_TAG_packed_type = 0x2d, + DW_TAG_subprogram = 0x2e, + DW_TAG_template_type_parameter = 0x2f, + DW_TAG_template_value_parameter = 0x30, + DW_TAG_thrown_type = 0x31, + DW_TAG_try_block = 0x32, + DW_TAG_variant_part = 0x33, + DW_TAG_variable = 0x34, + DW_TAG_volatile_type = 0x35, + DW_TAG_dwarf_procedure = 0x36, + DW_TAG_restrict_type = 0x37, + DW_TAG_interface_type = 0x38, + DW_TAG_namespace = 0x39, + DW_TAG_imported_module = 0x3a, + DW_TAG_unspecified_type = 0x3b, + DW_TAG_partial_unit = 0x3c, + DW_TAG_imported_unit = 0x3d, + DW_TAG_condition = 0x3f, + DW_TAG_shared_type = 0x40, + DW_TAG_lo_user = 0x4080, + DW_TAG_hi_user = 0xffff, + + // Children flag + DW_CHILDREN_no = 0x00, + DW_CHILDREN_yes = 0x01, + + // Attributes + DW_AT_sibling = 0x01, + DW_AT_location = 0x02, + DW_AT_name = 0x03, + DW_AT_ordering = 0x09, + DW_AT_byte_size = 0x0b, + DW_AT_bit_offset = 0x0c, + DW_AT_bit_size = 0x0d, + DW_AT_stmt_list = 0x10, + DW_AT_low_pc = 0x11, + DW_AT_high_pc = 0x12, + DW_AT_language = 0x13, + DW_AT_discr = 0x15, + DW_AT_discr_value = 0x16, + DW_AT_visibility = 0x17, + DW_AT_import = 0x18, + DW_AT_string_length = 0x19, + DW_AT_common_reference = 0x1a, + DW_AT_comp_dir = 0x1b, + DW_AT_const_value = 0x1c, + DW_AT_containing_type = 0x1d, + DW_AT_default_value = 0x1e, + DW_AT_inline = 0x20, + DW_AT_is_optional = 0x21, + DW_AT_lower_bound = 0x22, + DW_AT_producer = 0x25, + DW_AT_prototyped = 0x27, + DW_AT_return_addr = 0x2a, + DW_AT_start_scope = 0x2c, + DW_AT_bit_stride = 0x2e, + DW_AT_upper_bound = 0x2f, + DW_AT_abstract_origin = 0x31, + DW_AT_accessibility = 0x32, + DW_AT_address_class = 0x33, + DW_AT_artificial = 0x34, + DW_AT_base_types = 0x35, + DW_AT_calling_convention = 0x36, + DW_AT_count = 0x37, + DW_AT_data_member_location = 0x38, + DW_AT_decl_column = 0x39, + DW_AT_decl_file = 0x3a, + DW_AT_decl_line = 0x3b, + DW_AT_declaration = 0x3c, + DW_AT_discr_list = 0x3d, + DW_AT_encoding = 0x3e, + DW_AT_external = 0x3f, + DW_AT_frame_base = 0x40, + DW_AT_friend = 0x41, + DW_AT_identifier_case = 0x42, + DW_AT_macro_info = 0x43, + DW_AT_namelist_item = 0x44, + DW_AT_priority = 0x45, + DW_AT_segment = 0x46, + DW_AT_specification = 0x47, + DW_AT_static_link = 0x48, + DW_AT_type = 0x49, + DW_AT_use_location = 0x4a, + DW_AT_variable_parameter = 0x4b, + DW_AT_virtuality = 0x4c, + DW_AT_vtable_elem_location = 0x4d, + DW_AT_allocated = 0x4e, + DW_AT_associated = 0x4f, + DW_AT_data_location = 0x50, + DW_AT_byte_stride = 0x51, + DW_AT_entry_pc = 0x52, + DW_AT_use_UTF8 = 0x53, + DW_AT_extension = 0x54, + DW_AT_ranges = 0x55, + DW_AT_trampoline = 0x56, + DW_AT_call_column = 0x57, + DW_AT_call_file = 0x58, + DW_AT_call_line = 0x59, + DW_AT_description = 0x5a, + DW_AT_binary_scale = 0x5b, + DW_AT_decimal_scale = 0x5c, + DW_AT_small = 0x5d, + DW_AT_decimal_sign = 0x5e, + DW_AT_digit_count = 0x5f, + DW_AT_picture_string = 0x60, + DW_AT_mutable = 0x61, + DW_AT_threads_scaled = 0x62, + DW_AT_explicit = 0x63, + DW_AT_object_pointer = 0x64, + DW_AT_endianity = 0x65, + DW_AT_elemental = 0x66, + DW_AT_pure = 0x67, + DW_AT_recursive = 0x68, + DW_AT_MIPS_linkage_name = 0x2007, + DW_AT_sf_names = 0x2101, + DW_AT_src_info = 0x2102, + DW_AT_mac_info = 0x2103, + DW_AT_src_coords = 0x2104, + DW_AT_body_begin = 0x2105, + DW_AT_body_end = 0x2106, + DW_AT_GNU_vector = 0x2107, + DW_AT_lo_user = 0x2000, + DW_AT_hi_user = 0x3fff, + + // Apple extensions. + DW_AT_APPLE_optimized = 0x3fe1, + DW_AT_APPLE_flags = 0x3fe2, + DW_AT_APPLE_isa = 0x3fe3, + DW_AT_APPLE_block = 0x3fe4, + DW_AT_APPLE_major_runtime_vers = 0x3fe5, + DW_AT_APPLE_runtime_class = 0x3fe6, + + // Attribute form encodings + DW_FORM_addr = 0x01, + DW_FORM_block2 = 0x03, + DW_FORM_block4 = 0x04, + DW_FORM_data2 = 0x05, + DW_FORM_data4 = 0x06, + DW_FORM_data8 = 0x07, + DW_FORM_string = 0x08, + DW_FORM_block = 0x09, + DW_FORM_block1 = 0x0a, + DW_FORM_data1 = 0x0b, + DW_FORM_flag = 0x0c, + DW_FORM_sdata = 0x0d, + DW_FORM_strp = 0x0e, + DW_FORM_udata = 0x0f, + DW_FORM_ref_addr = 0x10, + DW_FORM_ref1 = 0x11, + DW_FORM_ref2 = 0x12, + DW_FORM_ref4 = 0x13, + DW_FORM_ref8 = 0x14, + DW_FORM_ref_udata = 0x15, + DW_FORM_indirect = 0x16, + + // Operation encodings + DW_OP_addr = 0x03, + DW_OP_deref = 0x06, + DW_OP_const1u = 0x08, + DW_OP_const1s = 0x09, + DW_OP_const2u = 0x0a, + DW_OP_const2s = 0x0b, + DW_OP_const4u = 0x0c, + DW_OP_const4s = 0x0d, + DW_OP_const8u = 0x0e, + DW_OP_const8s = 0x0f, + DW_OP_constu = 0x10, + DW_OP_consts = 0x11, + DW_OP_dup = 0x12, + DW_OP_drop = 0x13, + DW_OP_over = 0x14, + DW_OP_pick = 0x15, + DW_OP_swap = 0x16, + DW_OP_rot = 0x17, + DW_OP_xderef = 0x18, + DW_OP_abs = 0x19, + DW_OP_and = 0x1a, + DW_OP_div = 0x1b, + DW_OP_minus = 0x1c, + DW_OP_mod = 0x1d, + DW_OP_mul = 0x1e, + DW_OP_neg = 0x1f, + DW_OP_not = 0x20, + DW_OP_or = 0x21, + DW_OP_plus = 0x22, + DW_OP_plus_uconst = 0x23, + DW_OP_shl = 0x24, + DW_OP_shr = 0x25, + DW_OP_shra = 0x26, + DW_OP_xor = 0x27, + DW_OP_skip = 0x2f, + DW_OP_bra = 0x28, + DW_OP_eq = 0x29, + DW_OP_ge = 0x2a, + DW_OP_gt = 0x2b, + DW_OP_le = 0x2c, + DW_OP_lt = 0x2d, + DW_OP_ne = 0x2e, + DW_OP_lit0 = 0x30, + DW_OP_lit1 = 0x31, + DW_OP_lit31 = 0x4f, + DW_OP_reg0 = 0x50, + DW_OP_reg1 = 0x51, + DW_OP_reg31 = 0x6f, + DW_OP_breg0 = 0x70, + DW_OP_breg1 = 0x71, + DW_OP_breg31 = 0x8f, + DW_OP_regx = 0x90, + DW_OP_fbreg = 0x91, + DW_OP_bregx = 0x92, + DW_OP_piece = 0x93, + DW_OP_deref_size = 0x94, + DW_OP_xderef_size = 0x95, + DW_OP_nop = 0x96, + DW_OP_push_object_address = 0x97, + DW_OP_call2 = 0x98, + DW_OP_call4 = 0x99, + DW_OP_call_ref = 0x9a, + DW_OP_form_tls_address = 0x9b, + DW_OP_call_frame_cfa = 0x9c, + DW_OP_lo_user = 0xe0, + DW_OP_hi_user = 0xff, + + // Encoding attribute values + DW_ATE_address = 0x01, + DW_ATE_boolean = 0x02, + DW_ATE_complex_float = 0x03, + DW_ATE_float = 0x04, + DW_ATE_signed = 0x05, + DW_ATE_signed_char = 0x06, + DW_ATE_unsigned = 0x07, + DW_ATE_unsigned_char = 0x08, + DW_ATE_imaginary_float = 0x09, + DW_ATE_packed_decimal = 0x0a, + DW_ATE_numeric_string = 0x0b, + DW_ATE_edited = 0x0c, + DW_ATE_signed_fixed = 0x0d, + DW_ATE_unsigned_fixed = 0x0e, + DW_ATE_decimal_float = 0x0f, + DW_ATE_lo_user = 0x80, + DW_ATE_hi_user = 0xff, + + // Decimal sign attribute values + DW_DS_unsigned = 0x01, + DW_DS_leading_overpunch = 0x02, + DW_DS_trailing_overpunch = 0x03, + DW_DS_leading_separate = 0x04, + DW_DS_trailing_separate = 0x05, + + // Endianity attribute values + DW_END_default = 0x00, + DW_END_big = 0x01, + DW_END_little = 0x02, + DW_END_lo_user = 0x40, + DW_END_hi_user = 0xff, + + // Accessibility codes + DW_ACCESS_public = 0x01, + DW_ACCESS_protected = 0x02, + DW_ACCESS_private = 0x03, + + // Visibility codes + DW_VIS_local = 0x01, + DW_VIS_exported = 0x02, + DW_VIS_qualified = 0x03, + + // Virtuality codes + DW_VIRTUALITY_none = 0x00, + DW_VIRTUALITY_virtual = 0x01, + DW_VIRTUALITY_pure_virtual = 0x02, + + // Language names + DW_LANG_C89 = 0x0001, + DW_LANG_C = 0x0002, + DW_LANG_Ada83 = 0x0003, + DW_LANG_C_plus_plus = 0x0004, + DW_LANG_Cobol74 = 0x0005, + DW_LANG_Cobol85 = 0x0006, + DW_LANG_Fortran77 = 0x0007, + DW_LANG_Fortran90 = 0x0008, + DW_LANG_Pascal83 = 0x0009, + DW_LANG_Modula2 = 0x000a, + DW_LANG_Java = 0x000b, + DW_LANG_C99 = 0x000c, + DW_LANG_Ada95 = 0x000d, + DW_LANG_Fortran95 = 0x000e, + DW_LANG_PLI = 0x000f, + DW_LANG_ObjC = 0x0010, + DW_LANG_ObjC_plus_plus = 0x0011, + DW_LANG_UPC = 0x0012, + DW_LANG_D = 0x0013, + DW_LANG_lo_user = 0x8000, + DW_LANG_hi_user = 0xffff, + + // Identifier case codes + DW_ID_case_sensitive = 0x00, + DW_ID_up_case = 0x01, + DW_ID_down_case = 0x02, + DW_ID_case_insensitive = 0x03, + + // Calling convention codes + DW_CC_normal = 0x01, + DW_CC_program = 0x02, + DW_CC_nocall = 0x03, + DW_CC_lo_user = 0x40, + DW_CC_hi_user = 0xff, + + // Inline codes + DW_INL_not_inlined = 0x00, + DW_INL_inlined = 0x01, + DW_INL_declared_not_inlined = 0x02, + DW_INL_declared_inlined = 0x03, + + // Array ordering + DW_ORD_row_major = 0x00, + DW_ORD_col_major = 0x01, + + // Discriminant descriptor values + DW_DSC_label = 0x00, + DW_DSC_range = 0x01, + + // Line Number Standard Opcode Encodings + DW_LNS_copy = 0x01, + DW_LNS_advance_pc = 0x02, + DW_LNS_advance_line = 0x03, + DW_LNS_set_file = 0x04, + DW_LNS_set_column = 0x05, + DW_LNS_negate_stmt = 0x06, + DW_LNS_set_basic_block = 0x07, + DW_LNS_const_add_pc = 0x08, + DW_LNS_fixed_advance_pc = 0x09, + DW_LNS_set_prologue_end = 0x0a, + DW_LNS_set_epilogue_begin = 0x0b, + DW_LNS_set_isa = 0x0c, + + // Line Number Extended Opcode Encodings + DW_LNE_end_sequence = 0x01, + DW_LNE_set_address = 0x02, + DW_LNE_define_file = 0x03, + DW_LNE_lo_user = 0x80, + DW_LNE_hi_user = 0xff, + + // Macinfo Type Encodings + DW_MACINFO_define = 0x01, + DW_MACINFO_undef = 0x02, + DW_MACINFO_start_file = 0x03, + DW_MACINFO_end_file = 0x04, + DW_MACINFO_vendor_ext = 0xff, + + // Call frame instruction encodings + DW_CFA_extended = 0x00, + DW_CFA_advance_loc = 0x40, + DW_CFA_offset = 0x80, + DW_CFA_restore = 0xc0, + DW_CFA_set_loc = 0x01, + DW_CFA_advance_loc1 = 0x02, + DW_CFA_advance_loc2 = 0x03, + DW_CFA_advance_loc4 = 0x04, + DW_CFA_offset_extended = 0x05, + DW_CFA_restore_extended = 0x06, + DW_CFA_undefined = 0x07, + DW_CFA_same_value = 0x08, + DW_CFA_register = 0x09, + DW_CFA_remember_state = 0x0a, + DW_CFA_restore_state = 0x0b, + DW_CFA_def_cfa = 0x0c, + DW_CFA_def_cfa_register = 0x0d, + DW_CFA_def_cfa_offset = 0x0e, + DW_CFA_def_cfa_expression = 0x0f, + DW_CFA_expression = 0x10, + DW_CFA_offset_extended_sf = 0x11, + DW_CFA_def_cfa_sf = 0x12, + DW_CFA_def_cfa_offset_sf = 0x13, + DW_CFA_val_offset = 0x14, + DW_CFA_val_offset_sf = 0x15, + DW_CFA_val_expression = 0x16, + DW_CFA_lo_user = 0x1c, + DW_CFA_hi_user = 0x3f, + + DW_EH_PE_absptr = 0x00, + DW_EH_PE_omit = 0xff, + DW_EH_PE_uleb128 = 0x01, + DW_EH_PE_udata2 = 0x02, + DW_EH_PE_udata4 = 0x03, + DW_EH_PE_udata8 = 0x04, + DW_EH_PE_sleb128 = 0x09, + DW_EH_PE_sdata2 = 0x0A, + DW_EH_PE_sdata4 = 0x0B, + DW_EH_PE_sdata8 = 0x0C, + DW_EH_PE_signed = 0x08, + DW_EH_PE_pcrel = 0x10, + DW_EH_PE_textrel = 0x20, + DW_EH_PE_datarel = 0x30, + DW_EH_PE_funcrel = 0x40, + DW_EH_PE_aligned = 0x50, + DW_EH_PE_indirect = 0x80 +}; + +/// TagString - Return the string for the specified tag. +/// +const char *TagString(unsigned Tag); + +/// ChildrenString - Return the string for the specified children flag. +/// +const char *ChildrenString(unsigned Children); + +/// AttributeString - Return the string for the specified attribute. +/// +const char *AttributeString(unsigned Attribute); + +/// FormEncodingString - Return the string for the specified form encoding. +/// +const char *FormEncodingString(unsigned Encoding); + +/// OperationEncodingString - Return the string for the specified operation +/// encoding. +const char *OperationEncodingString(unsigned Encoding); + +/// AttributeEncodingString - Return the string for the specified attribute +/// encoding. +const char *AttributeEncodingString(unsigned Encoding); + +/// DecimalSignString - Return the string for the specified decimal sign +/// attribute. +const char *DecimalSignString(unsigned Sign); + +/// EndianityString - Return the string for the specified endianity. +/// +const char *EndianityString(unsigned Endian); + +/// AccessibilityString - Return the string for the specified accessibility. +/// +const char *AccessibilityString(unsigned Access); + +/// VisibilityString - Return the string for the specified visibility. +/// +const char *VisibilityString(unsigned Visibility); + +/// VirtualityString - Return the string for the specified virtuality. +/// +const char *VirtualityString(unsigned Virtuality); + +/// LanguageString - Return the string for the specified language. +/// +const char *LanguageString(unsigned Language); + +/// CaseString - Return the string for the specified identifier case. +/// +const char *CaseString(unsigned Case); + +/// ConventionString - Return the string for the specified calling convention. +/// +const char *ConventionString(unsigned Convention); + +/// InlineCodeString - Return the string for the specified inline code. +/// +const char *InlineCodeString(unsigned Code); + +/// ArrayOrderString - Return the string for the specified array order. +/// +const char *ArrayOrderString(unsigned Order); + +/// DiscriminantString - Return the string for the specified discriminant +/// descriptor. +const char *DiscriminantString(unsigned Discriminant); + +/// LNStandardString - Return the string for the specified line number standard. +/// +const char *LNStandardString(unsigned Standard); + +/// LNExtendedString - Return the string for the specified line number extended +/// opcode encodings. +const char *LNExtendedString(unsigned Encoding); + +/// MacinfoString - Return the string for the specified macinfo type encodings. +/// +const char *MacinfoString(unsigned Encoding); + +/// CallFrameString - Return the string for the specified call frame instruction +/// encodings. +const char *CallFrameString(unsigned Encoding); + +} // End of namespace dwarf + +} // End of namespace llvm + +#endif diff --git a/include/llvm/Support/DynamicLinker.h b/include/llvm/Support/DynamicLinker.h new file mode 100644 index 0000000..b60ffa8 --- /dev/null +++ b/include/llvm/Support/DynamicLinker.h @@ -0,0 +1,40 @@ +//===-- llvm/Support/DynamicLinker.h - Portable Dynamic Linker --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Lightweight interface to dynamic library linking and loading, and dynamic +// symbol lookup functionality, in whatever form the operating system +// provides it. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_DYNAMICLINKER_H +#define LLVM_SUPPORT_DYNAMICLINKER_H + +#include <string> + +namespace llvm { + +/// LinkDynamicObject - Load the named file as a dynamic library +/// and link it with the currently running process. Returns false +/// on success, true if there is an error (and sets ErrorMessage +/// if it is not NULL). Analogous to dlopen(). +/// +bool LinkDynamicObject (const char *filename, std::string *ErrorMessage); + +/// GetAddressOfSymbol - Returns the address of the named symbol in +/// the currently running process, as reported by the dynamic linker, +/// or NULL if the symbol does not exist or some other error has +/// occurred. +/// +void *GetAddressOfSymbol (const char *symbolName); +void *GetAddressOfSymbol (const std::string &symbolName); + +} // End llvm namespace + +#endif // SUPPORT_DYNAMICLINKER_H diff --git a/include/llvm/Support/ELF.h b/include/llvm/Support/ELF.h new file mode 100644 index 0000000..aa27946 --- /dev/null +++ b/include/llvm/Support/ELF.h @@ -0,0 +1,309 @@ +//===-- llvm/Support/ELF.h - ELF constants and data structures --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header contains common, non-processor-specific data structures and +// constants for the ELF file format. +// +// The details of the ELF32 bits in this file are largely based on +// the Tool Interface Standard (TIS) Executable and Linking Format +// (ELF) Specification Version 1.2, May 1995. The ELF64 stuff is not +// standardized, as far as I can tell. It was largely based on information +// I found in OpenBSD header files. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_ELF_H +#define LLVM_SUPPORT_ELF_H + +#include "llvm/Support/DataTypes.h" +#include <cstring> + +namespace llvm { + +namespace ELF { + +typedef uint32_t Elf32_Addr; // Program address +typedef uint16_t Elf32_Half; +typedef uint32_t Elf32_Off; // File offset +typedef int32_t Elf32_Sword; +typedef uint32_t Elf32_Word; + +typedef uint64_t Elf64_Addr; +typedef uint64_t Elf64_Off; +typedef int32_t Elf64_Shalf; +typedef int32_t Elf64_Sword; +typedef uint32_t Elf64_Word; +typedef int64_t Elf64_Sxword; +typedef uint64_t Elf64_Xword; +typedef uint32_t Elf64_Half; +typedef uint16_t Elf64_Quarter; + +// Object file magic string. +static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' }; + +struct Elf32_Ehdr { + unsigned char e_ident[16]; // ELF Identification bytes + Elf32_Half e_type; // Type of file (see ET_* below) + Elf32_Half e_machine; // Required architecture for this file (see EM_*) + Elf32_Word e_version; // Must be equal to 1 + Elf32_Addr e_entry; // Address to jump to in order to start program + Elf32_Off e_phoff; // Program header table's file offset, in bytes + Elf32_Off e_shoff; // Section header table's file offset, in bytes + Elf32_Word e_flags; // Processor-specific flags + Elf32_Half e_ehsize; // Size of ELF header, in bytes + Elf32_Half e_phentsize; // Size of an entry in the program header table + Elf32_Half e_phnum; // Number of entries in the program header table + Elf32_Half e_shentsize; // Size of an entry in the section header table + Elf32_Half e_shnum; // Number of entries in the section header table + Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table + bool checkMagic () const { + return (memcmp (e_ident, ElfMagic, strlen (ElfMagic))) == 0; + } + unsigned char getFileClass () const { return e_ident[4]; } + unsigned char getDataEncoding () { return e_ident[5]; } +}; + +// 64-bit ELF header. Fields are the same as for ELF32, but with different +// types (see above). +struct Elf64_Ehdr { + unsigned char e_ident[16]; + Elf64_Quarter e_type; + Elf64_Quarter e_machine; + Elf64_Half e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Half e_flags; + Elf64_Quarter e_ehsize; + Elf64_Quarter e_phentsize; + Elf64_Quarter e_phnum; + Elf64_Quarter e_shentsize; + Elf64_Quarter e_shnum; + Elf64_Quarter e_shstrndx; +}; + +// File types +enum { + ET_NONE = 0, // No file type + ET_REL = 1, // Relocatable file + ET_EXEC = 2, // Executable file + ET_DYN = 3, // Shared object file + ET_CORE = 4, // Core file + ET_LOPROC = 0xff00, // Beginning of processor-specific codes + ET_HIPROC = 0xffff // Processor-specific +}; + +// Machine architectures +enum { + EM_NONE = 0, // No machine + EM_M32 = 1, // AT&T WE 32100 + EM_SPARC = 2, // SPARC + EM_386 = 3, // Intel 386 + EM_68K = 4, // Motorola 68000 + EM_88K = 5, // Motorola 88000 + EM_486 = 6, // Intel 486 (deprecated) + EM_860 = 7, // Intel 80860 + EM_MIPS = 8, // MIPS R3000 + EM_PPC = 20, // PowerPC + EM_ARM = 40, // ARM + EM_ALPHA = 41, // DEC Alpha + EM_SPARCV9 = 43, // SPARC V9 + EM_X86_64 = 62 // AMD64 +}; + +// Object file classes. +enum { + ELFCLASS32 = 1, // 32-bit object file + ELFCLASS64 = 2 // 64-bit object file +}; + +// Object file byte orderings. +enum { + ELFDATA2LSB = 1, // Little-endian object file + ELFDATA2MSB = 2 // Big-endian object file +}; + +// Section header. +struct Elf32_Shdr { + Elf32_Word sh_name; // Section name (index into string table) + Elf32_Word sh_type; // Section type (SHT_*) + Elf32_Word sh_flags; // Section flags (SHF_*) + Elf32_Addr sh_addr; // Address where section is to be loaded + Elf32_Off sh_offset; // File offset of section data, in bytes + Elf32_Word sh_size; // Size of section, in bytes + Elf32_Word sh_link; // Section type-specific header table index link + Elf32_Word sh_info; // Section type-specific extra information + Elf32_Word sh_addralign; // Section address alignment + Elf32_Word sh_entsize; // Size of records contained within the section +}; + +// Section header for ELF64 - same fields as ELF32, different types. +struct Elf64_Shdr { + Elf64_Half sh_name; + Elf64_Half sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Half sh_link; + Elf64_Half sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +}; + +// Special section indices. +enum { + SHN_UNDEF = 0, // Undefined, missing, irrelevant, or meaningless + SHN_LORESERVE = 0xff00, // Lowest reserved index + SHN_LOPROC = 0xff00, // Lowest processor-specific index + SHN_HIPROC = 0xff1f, // Highest processor-specific index + SHN_ABS = 0xfff1, // Symbol has absolute value; does not need relocation + SHN_COMMON = 0xfff2, // FORTRAN COMMON or C external global variables + SHN_HIRESERVE = 0xffff // Highest reserved index +}; + +// Section types. +enum { + SHT_NULL = 0, // No associated section (inactive entry). + SHT_PROGBITS = 1, // Program-defined contents. + SHT_SYMTAB = 2, // Symbol table. + SHT_STRTAB = 3, // String table. + SHT_RELA = 4, // Relocation entries; explicit addends. + SHT_HASH = 5, // Symbol hash table. + SHT_DYNAMIC = 6, // Information for dynamic linking. + SHT_NOTE = 7, // Information about the file. + SHT_NOBITS = 8, // Data occupies no space in the file. + SHT_REL = 9, // Relocation entries; no explicit addends. + SHT_SHLIB = 10, // Reserved. + SHT_DYNSYM = 11, // Symbol table. + SHT_LOPROC = 0x70000000, // Lowest processor architecture-specific type. + SHT_HIPROC = 0x7fffffff, // Highest processor architecture-specific type. + SHT_LOUSER = 0x80000000, // Lowest type reserved for applications. + SHT_HIUSER = 0xffffffff // Highest type reserved for applications. +}; + +// Section flags. +enum { + SHF_WRITE = 0x1, // Section data should be writable during execution. + SHF_ALLOC = 0x2, // Section occupies memory during program execution. + SHF_EXECINSTR = 0x4, // Section contains executable machine instructions. + SHF_MASKPROC = 0xf0000000 // Bits indicating processor-specific flags. +}; + +// Symbol table entries. +struct Elf32_Sym { + Elf32_Word st_name; // Symbol name (index into string table) + Elf32_Addr st_value; // Value or address associated with the symbol + Elf32_Word st_size; // Size of the symbol + unsigned char st_info; // Symbol's type and binding attributes + unsigned char st_other; // Must be zero; reserved + Elf32_Half st_shndx; // Which section (header table index) it's defined in + + // These accessors and mutators correspond to the ELF32_ST_BIND, + // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification: + unsigned char getBinding () const { return st_info >> 4; } + unsigned char getType () const { return st_info & 0x0f; } + void setBinding (unsigned char b) { setBindingAndType (b, getType ()); } + void setType (unsigned char t) { setBindingAndType (getBinding (), t); } + void setBindingAndType (unsigned char b, unsigned char t) { + st_info = (b << 4) + (t & 0x0f); + } +}; + +// Symbol bindings. +enum { + STB_LOCAL = 0, // Local symbol, not visible outside obj file containing def + STB_GLOBAL = 1, // Global symbol, visible to all object files being combined + STB_WEAK = 2, // Weak symbol, like global but lower-precedence + STB_LOPROC = 13, // Lowest processor-specific binding type + STB_HIPROC = 15 // Highest processor-specific binding type +}; + +// Symbol types. +enum { + STT_NOTYPE = 0, // Symbol's type is not specified + STT_OBJECT = 1, // Symbol is a data object (variable, array, etc.) + STT_FUNC = 2, // Symbol is executable code (function, etc.) + STT_SECTION = 3, // Symbol refers to a section + STT_FILE = 4, // Local, absolute symbol that refers to a file + STT_LOPROC = 13, // Lowest processor-specific symbol type + STT_HIPROC = 15 // Highest processor-specific symbol type +}; + +// Relocation entry, without explicit addend. +struct Elf32_Rel { + Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr) + Elf32_Word r_info; // Symbol table index and type of relocation to apply + + // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE, + // and ELF32_R_INFO macros defined in the ELF specification: + Elf32_Word getSymbol () const { return (r_info >> 8); } + unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); } + void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); } + void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); } + void setSymbolAndType (Elf32_Word s, unsigned char t) { + r_info = (s << 8) + t; + }; +}; + +// Relocation entry with explicit addend. +struct Elf32_Rela { + Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr) + Elf32_Word r_info; // Symbol table index and type of relocation to apply + Elf32_Sword r_addend; // Compute value for relocatable field by adding this + + // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE, + // and ELF32_R_INFO macros defined in the ELF specification: + Elf32_Word getSymbol () const { return (r_info >> 8); } + unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); } + void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); } + void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); } + void setSymbolAndType (Elf32_Word s, unsigned char t) { + r_info = (s << 8) + t; + }; +}; + +// Program header. +struct Elf32_Phdr { + Elf32_Word p_type; // Type of segment + Elf32_Off p_offset; // File offset where segment is located, in bytes + Elf32_Addr p_vaddr; // Virtual address of beginning of segment + Elf32_Addr p_paddr; // Physical address of beginning of segment (OS-specific) + Elf32_Word p_filesz; // Num. of bytes in file image of segment (may be zero) + Elf32_Word p_memsz; // Num. of bytes in mem image of segment (may be zero) + Elf32_Word p_flags; // Segment flags + Elf32_Word p_align; // Segment alignment constraint +}; + +// Segment types. +enum { + PT_NULL = 0, // Unused segment. + PT_LOAD = 1, // Loadable segment. + PT_DYNAMIC = 2, // Dynamic linking information. + PT_INTERP = 3, // Interpreter pathname. + PT_NOTE = 4, // Auxiliary information. + PT_SHLIB = 5, // Reserved. + PT_PHDR = 6, // The program header table itself. + PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type. + PT_HIPROC = 0x7fffffff // Highest processor-specific program hdr entry type. +}; + +// Segment flag bits. +enum { + PF_X = 1, // Execute + PF_W = 2, // Write + PF_R = 4, // Read + PF_MASKPROC = 0xf0000000 // Unspecified +}; + +} // end namespace ELF + +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/FileUtilities.h b/include/llvm/Support/FileUtilities.h new file mode 100644 index 0000000..cc8f953 --- /dev/null +++ b/include/llvm/Support/FileUtilities.h @@ -0,0 +1,59 @@ +//===- llvm/Support/FileUtilities.h - File System Utilities -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a family of utility functions which are useful for doing +// various things with files. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_FILEUTILITIES_H +#define LLVM_SUPPORT_FILEUTILITIES_H + +#include "llvm/System/Path.h" + +namespace llvm { + + /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if + /// the files match, 1 if they are different, and 2 if there is a file error. + /// This function allows you to specify an absolete and relative FP error that + /// is allowed to exist. If you specify a string to fill in for the error + /// option, it will set the string to an error message if an error occurs, or + /// if the files are different. + /// + int DiffFilesWithTolerance(const sys::PathWithStatus &FileA, + const sys::PathWithStatus &FileB, + double AbsTol, double RelTol, + std::string *Error = 0); + + + /// FileRemover - This class is a simple object meant to be stack allocated. + /// If an exception is thrown from a region, the object removes the filename + /// specified (if deleteIt is true). + /// + class FileRemover { + sys::Path Filename; + bool DeleteIt; + public: + explicit FileRemover(const sys::Path &filename, bool deleteIt = true) + : Filename(filename), DeleteIt(deleteIt) {} + + ~FileRemover() { + if (DeleteIt) { + // Ignore problems deleting the file. + Filename.eraseFromDisk(); + } + } + + /// releaseFile - Take ownership of the file away from the FileRemover so it + /// will not be removed when the object is destroyed. + void releaseFile() { DeleteIt = false; } + }; +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/Format.h b/include/llvm/Support/Format.h new file mode 100644 index 0000000..2ab097f --- /dev/null +++ b/include/llvm/Support/Format.h @@ -0,0 +1,155 @@ +//===- Format.h - Efficient printf-style formatting for streams -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the format() function, which can be used with other +// LLVM subsystems to provide printf-style formatting. This gives all the power +// and risk of printf. This can be used like this (with raw_ostreams as an +// example): +// +// OS << "mynumber: " << format("%4.5f", 1234.412) << '\n'; +// +// Or if you prefer: +// +// OS << format("mynumber: %4.5f\n", 1234.412); +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_FORMAT_H +#define LLVM_SUPPORT_FORMAT_H + +#include <cstdio> +#ifdef WIN32 +#define snprintf _snprintf +#endif + +namespace llvm { + +/// format_object_base - This is a helper class used for handling formatted +/// output. It is the abstract base class of a templated derived class. +class format_object_base { +protected: + const char *Fmt; + virtual void home(); // Out of line virtual method. +public: + format_object_base(const char *fmt) : Fmt(fmt) {} + virtual ~format_object_base() {} + + /// print - Format the object into the specified buffer. On success, this + /// returns the length of the formatted string. If the buffer is too small, + /// this returns a length to retry with, which will be larger than BufferSize. + virtual unsigned print(char *Buffer, unsigned BufferSize) const = 0; +}; + +/// format_object1 - This is a templated helper class used by the format +/// function that captures the object to be formated and the format string. When +/// actually printed, this synthesizes the string into a temporary buffer +/// provided and returns whether or not it is big enough. +template <typename T> +class format_object1 : public format_object_base { + T Val; +public: + format_object1(const char *fmt, const T &val) + : format_object_base(fmt), Val(val) { + } + + /// print - Format the object into the specified buffer. On success, this + /// returns the length of the formatted string. If the buffer is too small, + /// this returns a length to retry with, which will be larger than BufferSize. + virtual unsigned print(char *Buffer, unsigned BufferSize) const { + int N = snprintf(Buffer, BufferSize-1, Fmt, Val); + if (N < 0) // VC++ and old GlibC return negative on overflow. + return BufferSize*2; + if (unsigned(N) >= BufferSize-1)// Other impls yield number of bytes needed. + return N+1; + // If N is positive and <= BufferSize-1, then the string fit, yay. + return N; + } +}; + +/// format_object2 - This is a templated helper class used by the format +/// function that captures the object to be formated and the format string. When +/// actually printed, this synthesizes the string into a temporary buffer +/// provided and returns whether or not it is big enough. +template <typename T1, typename T2> +class format_object2 : public format_object_base { + T1 Val1; + T2 Val2; +public: + format_object2(const char *fmt, const T1 &val1, const T2 &val2) + : format_object_base(fmt), Val1(val1), Val2(val2) { + } + + /// print - Format the object into the specified buffer. On success, this + /// returns the length of the formatted string. If the buffer is too small, + /// this returns a length to retry with, which will be larger than BufferSize. + virtual unsigned print(char *Buffer, unsigned BufferSize) const { + int N = snprintf(Buffer, BufferSize-1, Fmt, Val1, Val2); + if (N < 0) // VC++ and old GlibC return negative on overflow. + return BufferSize*2; + if (unsigned(N) >= BufferSize-1)// Other impls yield number of bytes needed. + return N+1; + // If N is positive and <= BufferSize-1, then the string fit, yay. + return N; + } +}; + +/// format_object3 - This is a templated helper class used by the format +/// function that captures the object to be formated and the format string. When +/// actually printed, this synthesizes the string into a temporary buffer +/// provided and returns whether or not it is big enough. +template <typename T1, typename T2, typename T3> +class format_object3 : public format_object_base { + T1 Val1; + T2 Val2; + T3 Val3; +public: + format_object3(const char *fmt, const T1 &val1, const T2 &val2,const T3 &val3) + : format_object_base(fmt), Val1(val1), Val2(val2), Val3(val3) { + } + + /// print - Format the object into the specified buffer. On success, this + /// returns the length of the formatted string. If the buffer is too small, + /// this returns a length to retry with, which will be larger than BufferSize. + virtual unsigned print(char *Buffer, unsigned BufferSize) const { + int N = snprintf(Buffer, BufferSize-1, Fmt, Val1, Val2, Val3); + if (N < 0) // VC++ and old GlibC return negative on overflow. + return BufferSize*2; + if (unsigned(N) >= BufferSize-1)// Other impls yield number of bytes needed. + return N+1; + // If N is positive and <= BufferSize-1, then the string fit, yay. + return N; + } +}; + +/// format - This is a helper function that is used to produce formatted output. +/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +template <typename T> +inline format_object1<T> format(const char *Fmt, const T &Val) { + return format_object1<T>(Fmt, Val); +} + +/// format - This is a helper function that is used to produce formatted output. +/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +template <typename T1, typename T2> +inline format_object2<T1, T2> format(const char *Fmt, const T1 &Val1, + const T2 &Val2) { + return format_object2<T1, T2>(Fmt, Val1, Val2); +} + +/// format - This is a helper function that is used to produce formatted output. +/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +template <typename T1, typename T2, typename T3> + inline format_object3<T1, T2, T3> format(const char *Fmt, const T1 &Val1, + const T2 &Val2, const T3 &Val3) { + return format_object3<T1, T2, T3>(Fmt, Val1, Val2, Val3); +} + +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/GetElementPtrTypeIterator.h b/include/llvm/Support/GetElementPtrTypeIterator.h new file mode 100644 index 0000000..e1cda75 --- /dev/null +++ b/include/llvm/Support/GetElementPtrTypeIterator.h @@ -0,0 +1,112 @@ +//===- llvm/Support/GetElementPtrTypeIterator.h -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements an iterator for walking through the types indexed by +// getelementptr instructions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_GETELEMENTPTRTYPE_H +#define LLVM_SUPPORT_GETELEMENTPTRTYPE_H + +#include "llvm/User.h" +#include "llvm/DerivedTypes.h" + +namespace llvm { + template<typename ItTy = User::const_op_iterator> + class generic_gep_type_iterator + : public forward_iterator<const Type *, ptrdiff_t> { + typedef forward_iterator<const Type*, ptrdiff_t> super; + + ItTy OpIt; + const Type *CurTy; + generic_gep_type_iterator() {} + public: + + static generic_gep_type_iterator begin(const Type *Ty, ItTy It) { + generic_gep_type_iterator I; + I.CurTy = Ty; + I.OpIt = It; + return I; + } + static generic_gep_type_iterator end(ItTy It) { + generic_gep_type_iterator I; + I.CurTy = 0; + I.OpIt = It; + return I; + } + + bool operator==(const generic_gep_type_iterator& x) const { + return OpIt == x.OpIt; + } + bool operator!=(const generic_gep_type_iterator& x) const { + return !operator==(x); + } + + const Type *operator*() const { + return CurTy; + } + + const Type *getIndexedType() const { + const CompositeType *CT = cast<CompositeType>(CurTy); + return CT->getTypeAtIndex(getOperand()); + } + + // This is a non-standard operator->. It allows you to call methods on the + // current type directly. + const Type *operator->() const { return operator*(); } + + Value *getOperand() const { return *OpIt; } + + generic_gep_type_iterator& operator++() { // Preincrement + if (const CompositeType *CT = dyn_cast<CompositeType>(CurTy)) { + CurTy = CT->getTypeAtIndex(getOperand()); + } else { + CurTy = 0; + } + ++OpIt; + return *this; + } + + generic_gep_type_iterator operator++(int) { // Postincrement + generic_gep_type_iterator tmp = *this; ++*this; return tmp; + } + }; + + typedef generic_gep_type_iterator<> gep_type_iterator; + + inline gep_type_iterator gep_type_begin(const User *GEP) { + return gep_type_iterator::begin(GEP->getOperand(0)->getType(), + GEP->op_begin()+1); + } + inline gep_type_iterator gep_type_end(const User *GEP) { + return gep_type_iterator::end(GEP->op_end()); + } + inline gep_type_iterator gep_type_begin(const User &GEP) { + return gep_type_iterator::begin(GEP.getOperand(0)->getType(), + GEP.op_begin()+1); + } + inline gep_type_iterator gep_type_end(const User &GEP) { + return gep_type_iterator::end(GEP.op_end()); + } + + template<typename ItTy> + inline generic_gep_type_iterator<ItTy> + gep_type_begin(const Type *Op0, ItTy I, ItTy E) { + return generic_gep_type_iterator<ItTy>::begin(Op0, I); + } + + template<typename ItTy> + inline generic_gep_type_iterator<ItTy> + gep_type_end(const Type *Op0, ItTy I, ItTy E) { + return generic_gep_type_iterator<ItTy>::end(E); + } +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/GraphWriter.h b/include/llvm/Support/GraphWriter.h new file mode 100644 index 0000000..ca28aafa --- /dev/null +++ b/include/llvm/Support/GraphWriter.h @@ -0,0 +1,323 @@ +//===-- llvm/Support/GraphWriter.h - Write graph to a .dot file -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a simple interface that can be used to print out generic +// LLVM graphs to ".dot" files. "dot" is a tool that is part of the AT&T +// graphviz package (http://www.research.att.com/sw/tools/graphviz/) which can +// be used to turn the files output by this interface into a variety of +// different graphics formats. +// +// Graphs do not need to implement any interface past what is already required +// by the GraphTraits template, but they can choose to implement specializations +// of the DOTGraphTraits template if they want to customize the graphs output in +// any way. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_GRAPHWRITER_H +#define LLVM_SUPPORT_GRAPHWRITER_H + +#include "llvm/Support/DOTGraphTraits.h" +#include "llvm/Support/Streams.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/System/Path.h" +#include <fstream> +#include <vector> + +namespace llvm { + +namespace DOT { // Private functions... + inline std::string EscapeString(const std::string &Label) { + std::string Str(Label); + for (unsigned i = 0; i != Str.length(); ++i) + switch (Str[i]) { + case '\n': + Str.insert(Str.begin()+i, '\\'); // Escape character... + ++i; + Str[i] = 'n'; + break; + case '\t': + Str.insert(Str.begin()+i, ' '); // Convert to two spaces + ++i; + Str[i] = ' '; + break; + case '\\': + if (i+1 != Str.length()) + switch (Str[i+1]) { + case 'l': continue; // don't disturb \l + case '|': case '{': case '}': + Str.erase(Str.begin()+i); continue; + default: break; + } + case '{': case '}': + case '<': case '>': + case '|': case '"': + Str.insert(Str.begin()+i, '\\'); // Escape character... + ++i; // don't infinite loop + break; + } + return Str; + } +} + +void DisplayGraph(const sys::Path& Filename); + +template<typename GraphType> +class GraphWriter { + std::ostream &O; + const GraphType &G; + + typedef DOTGraphTraits<GraphType> DOTTraits; + typedef GraphTraits<GraphType> GTraits; + typedef typename GTraits::NodeType NodeType; + typedef typename GTraits::nodes_iterator node_iterator; + typedef typename GTraits::ChildIteratorType child_iterator; +public: + GraphWriter(std::ostream &o, const GraphType &g) : O(o), G(g) {} + + void writeHeader(const std::string &Name) { + std::string GraphName = DOTTraits::getGraphName(G); + + if (!Name.empty()) + O << "digraph \"" << DOT::EscapeString(Name) << "\" {\n"; + else if (!GraphName.empty()) + O << "digraph \"" << DOT::EscapeString(GraphName) << "\" {\n"; + else + O << "digraph unnamed {\n"; + + if (DOTTraits::renderGraphFromBottomUp()) + O << "\trankdir=\"BT\";\n"; + + if (!Name.empty()) + O << "\tlabel=\"" << DOT::EscapeString(Name) << "\";\n"; + else if (!GraphName.empty()) + O << "\tlabel=\"" << DOT::EscapeString(GraphName) << "\";\n"; + O << DOTTraits::getGraphProperties(G); + O << "\n"; + } + + void writeFooter() { + // Finish off the graph + O << "}\n"; + } + + void writeNodes() { + // Loop over the graph, printing it out... + for (node_iterator I = GTraits::nodes_begin(G), E = GTraits::nodes_end(G); + I != E; ++I) + writeNode(*I); + } + + void writeNode(NodeType& Node) { + writeNode(&Node); + } + + void writeNode(NodeType *const *Node) { + writeNode(*Node); + } + + void writeNode(NodeType *Node) { + std::string NodeAttributes = DOTTraits::getNodeAttributes(Node, G); + + O << "\tNode" << static_cast<const void*>(Node) << " [shape=record,"; + if (!NodeAttributes.empty()) O << NodeAttributes << ","; + O << "label=\"{"; + + if (!DOTTraits::renderGraphFromBottomUp()) { + O << DOT::EscapeString(DOTTraits::getNodeLabel(Node, G)); + + // If we should include the address of the node in the label, do so now. + if (DOTTraits::hasNodeAddressLabel(Node, G)) + O << "|" << (void*)Node; + } + + // Print out the fields of the current node... + child_iterator EI = GTraits::child_begin(Node); + child_iterator EE = GTraits::child_end(Node); + if (EI != EE) { + if (!DOTTraits::renderGraphFromBottomUp()) O << "|"; + O << "{"; + + for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) { + if (i) O << "|"; + O << "<s" << i << ">" << DOTTraits::getEdgeSourceLabel(Node, EI); + } + + if (EI != EE) + O << "|<s64>truncated..."; + O << "}"; + if (DOTTraits::renderGraphFromBottomUp()) O << "|"; + } + + if (DOTTraits::renderGraphFromBottomUp()) { + O << DOT::EscapeString(DOTTraits::getNodeLabel(Node, G)); + + // If we should include the address of the node in the label, do so now. + if (DOTTraits::hasNodeAddressLabel(Node, G)) + O << "|" << (void*)Node; + } + + if (DOTTraits::hasEdgeDestLabels()) { + O << "|{"; + + unsigned i = 0, e = DOTTraits::numEdgeDestLabels(Node); + for (; i != e && i != 64; ++i) { + if (i) O << "|"; + O << "<d" << i << ">" << DOTTraits::getEdgeDestLabel(Node, i); + } + + if (i != e) + O << "|<d64>truncated..."; + O << "}"; + } + + O << "}\"];\n"; // Finish printing the "node" line + + // Output all of the edges now + EI = GTraits::child_begin(Node); + for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) + writeEdge(Node, i, EI); + for (; EI != EE; ++EI) + writeEdge(Node, 64, EI); + } + + void writeEdge(NodeType *Node, unsigned edgeidx, child_iterator EI) { + if (NodeType *TargetNode = *EI) { + int DestPort = -1; + if (DOTTraits::edgeTargetsEdgeSource(Node, EI)) { + child_iterator TargetIt = DOTTraits::getEdgeTarget(Node, EI); + + // Figure out which edge this targets... + unsigned Offset = + (unsigned)std::distance(GTraits::child_begin(TargetNode), TargetIt); + DestPort = static_cast<int>(Offset); + } + + emitEdge(static_cast<const void*>(Node), edgeidx, + static_cast<const void*>(TargetNode), DestPort, + DOTTraits::getEdgeAttributes(Node, EI)); + } + } + + /// emitSimpleNode - Outputs a simple (non-record) node + void emitSimpleNode(const void *ID, const std::string &Attr, + const std::string &Label, unsigned NumEdgeSources = 0, + const std::vector<std::string> *EdgeSourceLabels = 0) { + O << "\tNode" << ID << "[ "; + if (!Attr.empty()) + O << Attr << ","; + O << " label =\""; + if (NumEdgeSources) O << "{"; + O << DOT::EscapeString(Label); + if (NumEdgeSources) { + O << "|{"; + + for (unsigned i = 0; i != NumEdgeSources; ++i) { + if (i) O << "|"; + O << "<g" << i << ">"; + if (EdgeSourceLabels) O << (*EdgeSourceLabels)[i]; + } + O << "}}"; + } + O << "\"];\n"; + } + + /// emitEdge - Output an edge from a simple node into the graph... + void emitEdge(const void *SrcNodeID, int SrcNodePort, + const void *DestNodeID, int DestNodePort, + const std::string &Attrs) { + if (SrcNodePort > 64) return; // Eminating from truncated part? + if (DestNodePort > 64) DestNodePort = 64; // Targetting the truncated part? + + O << "\tNode" << SrcNodeID; + if (SrcNodePort >= 0) + O << ":s" << SrcNodePort; + O << " -> Node" << DestNodeID; + if (DestNodePort >= 0) + O << ":d" << DestNodePort; + + if (!Attrs.empty()) + O << "[" << Attrs << "]"; + O << ";\n"; + } +}; + +template<typename GraphType> +std::ostream &WriteGraph(std::ostream &O, const GraphType &G, + const std::string &Name = "", + const std::string &Title = "") { + // Start the graph emission process... + GraphWriter<GraphType> W(O, G); + + // Output the header for the graph... + W.writeHeader(Title); + + // Emit all of the nodes in the graph... + W.writeNodes(); + + // Output any customizations on the graph + DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, W); + + // Output the end of the graph + W.writeFooter(); + return O; +} + +template<typename GraphType> +sys::Path WriteGraph(const GraphType &G, + const std::string& Name, + const std::string& Title = "") { + std::string ErrMsg; + sys::Path Filename = sys::Path::GetTemporaryDirectory(&ErrMsg); + if (Filename.isEmpty()) { + cerr << "Error: " << ErrMsg << "\n"; + return Filename; + } + Filename.appendComponent(Name + ".dot"); + if (Filename.makeUnique(true,&ErrMsg)) { + cerr << "Error: " << ErrMsg << "\n"; + return sys::Path(); + } + + cerr << "Writing '" << Filename << "'... "; + + std::ofstream O(Filename.c_str()); + + if (O.good()) { + WriteGraph(O, G, Name, Title); + cerr << " done. \n"; + + O.close(); + } else { + cerr << "error opening file for writing!\n"; + Filename.clear(); + } + + return Filename; +} + +/// ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, +/// then cleanup. For use from the debugger. +/// +template<typename GraphType> +void ViewGraph(const GraphType& G, + const std::string& Name, + const std::string& Title = "") { + sys::Path Filename = WriteGraph(G, Name, Title); + + if (Filename.isEmpty()) { + return; + } + + DisplayGraph(Filename); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/IRBuilder.h b/include/llvm/Support/IRBuilder.h new file mode 100644 index 0000000..9ef14af --- /dev/null +++ b/include/llvm/Support/IRBuilder.h @@ -0,0 +1,704 @@ +//===---- llvm/Support/IRBuilder.h - Builder for LLVM Instrs ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the IRBuilder class, which is used as a convenient way +// to create LLVM instructions with a consistent and simplified interface. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_IRBUILDER_H +#define LLVM_SUPPORT_IRBUILDER_H + +#include "llvm/Constants.h" +#include "llvm/Instructions.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Function.h" +#include "llvm/Support/ConstantFolder.h" + +namespace llvm { + +/// IRBuilder - This provides a uniform API for creating instructions and +/// inserting them into a basic block: either at the end of a BasicBlock, or +/// at a specific iterator location in a block. +/// +/// Note that the builder does not expose the full generality of LLVM +/// instructions. For example, it cannot be used to create instructions with +/// arbitrary names (specifically, names with nul characters in them) - It only +/// supports nul-terminated C strings. For fully generic names, use +/// I->setName(). For access to extra instruction properties, use the mutators +/// (e.g. setVolatile) on the instructions after they have been created. +/// The first template argument handles whether or not to preserve names in the +/// final instruction output. This defaults to on. The second template argument +/// specifies a class to use for creating constants. This defaults to creating +/// minimally folded constants. +template <bool preserveNames=true, typename T = ConstantFolder> class IRBuilder{ + BasicBlock *BB; + BasicBlock::iterator InsertPt; + T Folder; +public: + IRBuilder(const T& F = T()) : Folder(F) { ClearInsertionPoint(); } + explicit IRBuilder(BasicBlock *TheBB, const T& F = T()) + : Folder(F) { SetInsertPoint(TheBB); } + IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F = T()) + : Folder(F) { SetInsertPoint(TheBB, IP); } + + /// getFolder - Get the constant folder being used. + const T& getFolder() { return Folder; } + + /// isNamePreserving - Return true if this builder is configured to actually + /// add the requested names to IR created through it. + bool isNamePreserving() const { return preserveNames; } + + //===--------------------------------------------------------------------===// + // Builder configuration methods + //===--------------------------------------------------------------------===// + + /// ClearInsertionPoint - Clear the insertion point: created instructions will + /// not be inserted into a block. + void ClearInsertionPoint() { + BB = 0; + } + + BasicBlock *GetInsertBlock() const { return BB; } + + BasicBlock::iterator GetInsertPoint() const { return InsertPt; } + + /// SetInsertPoint - This specifies that created instructions should be + /// appended to the end of the specified block. + void SetInsertPoint(BasicBlock *TheBB) { + BB = TheBB; + InsertPt = BB->end(); + } + + /// SetInsertPoint - This specifies that created instructions should be + /// inserted at the specified point. + void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { + BB = TheBB; + InsertPt = IP; + } + + /// Insert - Insert and return the specified instruction. + template<typename InstTy> + InstTy *Insert(InstTy *I, const char *Name = "") const { + InsertHelper(I, Name); + return I; + } + + /// InsertHelper - Insert the specified instruction at the specified insertion + /// point. This is split out of Insert so that it isn't duplicated for every + /// template instantiation. + void InsertHelper(Instruction *I, const char *Name) const { + if (BB) BB->getInstList().insert(InsertPt, I); + if (preserveNames && Name[0]) + I->setName(Name); + } + + //===--------------------------------------------------------------------===// + // Instruction creation methods: Terminators + //===--------------------------------------------------------------------===// + + /// CreateRetVoid - Create a 'ret void' instruction. + ReturnInst *CreateRetVoid() { + return Insert(ReturnInst::Create()); + } + + /// @verbatim + /// CreateRet - Create a 'ret <val>' instruction. + /// @endverbatim + ReturnInst *CreateRet(Value *V) { + return Insert(ReturnInst::Create(V)); + } + + /// CreateAggregateRet - Create a sequence of N insertvalue instructions, + /// with one Value from the retVals array each, that build a aggregate + /// return value one value at a time, and a ret instruction to return + /// the resulting aggregate value. This is a convenience function for + /// code that uses aggregate return values as a vehicle for having + /// multiple return values. + /// + ReturnInst *CreateAggregateRet(Value * const* retVals, unsigned N) { + const Type *RetType = BB->getParent()->getReturnType(); + Value *V = UndefValue::get(RetType); + for (unsigned i = 0; i != N; ++i) + V = CreateInsertValue(V, retVals[i], i, "mrv"); + return Insert(ReturnInst::Create(V)); + } + + /// CreateBr - Create an unconditional 'br label X' instruction. + BranchInst *CreateBr(BasicBlock *Dest) { + return Insert(BranchInst::Create(Dest)); + } + + /// CreateCondBr - Create a conditional 'br Cond, TrueDest, FalseDest' + /// instruction. + BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False) { + return Insert(BranchInst::Create(True, False, Cond)); + } + + /// CreateSwitch - Create a switch instruction with the specified value, + /// default dest, and with a hint for the number of cases that will be added + /// (for efficient allocation). + SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10) { + return Insert(SwitchInst::Create(V, Dest, NumCases)); + } + + /// CreateInvoke - Create an invoke instruction. + template<typename InputIterator> + InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest, + BasicBlock *UnwindDest, InputIterator ArgBegin, + InputIterator ArgEnd, const char *Name = "") { + return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, + ArgBegin, ArgEnd), Name); + } + + UnwindInst *CreateUnwind() { + return Insert(new UnwindInst()); + } + + UnreachableInst *CreateUnreachable() { + return Insert(new UnreachableInst()); + } + + //===--------------------------------------------------------------------===// + // Instruction creation methods: Binary Operators + //===--------------------------------------------------------------------===// + + Value *CreateAdd(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateAdd(LC, RC); + return Insert(BinaryOperator::CreateAdd(LHS, RHS), Name); + } + Value *CreateSub(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateSub(LC, RC); + return Insert(BinaryOperator::CreateSub(LHS, RHS), Name); + } + Value *CreateMul(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateMul(LC, RC); + return Insert(BinaryOperator::CreateMul(LHS, RHS), Name); + } + Value *CreateUDiv(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateUDiv(LC, RC); + return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); + } + Value *CreateSDiv(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateSDiv(LC, RC); + return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); + } + Value *CreateFDiv(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateFDiv(LC, RC); + return Insert(BinaryOperator::CreateFDiv(LHS, RHS), Name); + } + Value *CreateURem(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateURem(LC, RC); + return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); + } + Value *CreateSRem(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateSRem(LC, RC); + return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); + } + Value *CreateFRem(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateFRem(LC, RC); + return Insert(BinaryOperator::CreateFRem(LHS, RHS), Name); + } + Value *CreateShl(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateShl(LC, RC); + return Insert(BinaryOperator::CreateShl(LHS, RHS), Name); + } + Value *CreateLShr(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateLShr(LC, RC); + return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); + } + Value *CreateAShr(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateAShr(LC, RC); + return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); + } + Value *CreateAnd(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateAnd(LC, RC); + return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); + } + Value *CreateOr(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateOr(LC, RC); + return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); + } + Value *CreateXor(Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateXor(LC, RC); + return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); + } + + Value *CreateBinOp(Instruction::BinaryOps Opc, + Value *LHS, Value *RHS, const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateBinOp(Opc, LC, RC); + return Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); + } + + Value *CreateNeg(Value *V, const char *Name = "") { + if (Constant *VC = dyn_cast<Constant>(V)) + return Folder.CreateNeg(VC); + return Insert(BinaryOperator::CreateNeg(V), Name); + } + Value *CreateNot(Value *V, const char *Name = "") { + if (Constant *VC = dyn_cast<Constant>(V)) + return Folder.CreateNot(VC); + return Insert(BinaryOperator::CreateNot(V), Name); + } + + //===--------------------------------------------------------------------===// + // Instruction creation methods: Memory Instructions + //===--------------------------------------------------------------------===// + + MallocInst *CreateMalloc(const Type *Ty, Value *ArraySize = 0, + const char *Name = "") { + return Insert(new MallocInst(Ty, ArraySize), Name); + } + AllocaInst *CreateAlloca(const Type *Ty, Value *ArraySize = 0, + const char *Name = "") { + return Insert(new AllocaInst(Ty, ArraySize), Name); + } + FreeInst *CreateFree(Value *Ptr) { + return Insert(new FreeInst(Ptr)); + } + LoadInst *CreateLoad(Value *Ptr, const char *Name = "") { + return Insert(new LoadInst(Ptr), Name); + } + LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const char *Name = "") { + return Insert(new LoadInst(Ptr, 0, isVolatile), Name); + } + StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { + return Insert(new StoreInst(Val, Ptr, isVolatile)); + } + template<typename InputIterator> + Value *CreateGEP(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd, + const char *Name = "") { + if (Constant *PC = dyn_cast<Constant>(Ptr)) { + // Every index must be constant. + InputIterator i; + for (i = IdxBegin; i < IdxEnd; ++i) { + if (!dyn_cast<Constant>(*i)) + break; + } + if (i == IdxEnd) + return Folder.CreateGetElementPtr(PC, &IdxBegin[0], IdxEnd - IdxBegin); + } + return Insert(GetElementPtrInst::Create(Ptr, IdxBegin, IdxEnd), Name); + } + Value *CreateGEP(Value *Ptr, Value *Idx, const char *Name = "") { + if (Constant *PC = dyn_cast<Constant>(Ptr)) + if (Constant *IC = dyn_cast<Constant>(Idx)) + return Folder.CreateGetElementPtr(PC, &IC, 1); + return Insert(GetElementPtrInst::Create(Ptr, Idx), Name); + } + Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const char *Name = "") { + Value *Idx = ConstantInt::get(Type::Int32Ty, Idx0); + + if (Constant *PC = dyn_cast<Constant>(Ptr)) + return Folder.CreateGetElementPtr(PC, &Idx, 1); + + return Insert(GetElementPtrInst::Create(Ptr, &Idx, &Idx+1), Name); + } + Value *CreateConstGEP2_32(Value *Ptr, unsigned Idx0, unsigned Idx1, + const char *Name = "") { + Value *Idxs[] = { + ConstantInt::get(Type::Int32Ty, Idx0), + ConstantInt::get(Type::Int32Ty, Idx1) + }; + + if (Constant *PC = dyn_cast<Constant>(Ptr)) + return Folder.CreateGetElementPtr(PC, Idxs, 2); + + return Insert(GetElementPtrInst::Create(Ptr, Idxs, Idxs+2), Name); + } + Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const char *Name = "") { + Value *Idx = ConstantInt::get(Type::Int64Ty, Idx0); + + if (Constant *PC = dyn_cast<Constant>(Ptr)) + return Folder.CreateGetElementPtr(PC, &Idx, 1); + + return Insert(GetElementPtrInst::Create(Ptr, &Idx, &Idx+1), Name); + } + Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, + const char *Name = "") { + Value *Idxs[] = { + ConstantInt::get(Type::Int64Ty, Idx0), + ConstantInt::get(Type::Int64Ty, Idx1) + }; + + if (Constant *PC = dyn_cast<Constant>(Ptr)) + return Folder.CreateGetElementPtr(PC, Idxs, 2); + + return Insert(GetElementPtrInst::Create(Ptr, Idxs, Idxs+2), Name); + } + Value *CreateStructGEP(Value *Ptr, unsigned Idx, const char *Name = "") { + return CreateConstGEP2_32(Ptr, 0, Idx, Name); + } + Value *CreateGlobalString(const char *Str = "", const char *Name = "") { + Constant *StrConstant = ConstantArray::get(Str, true); + GlobalVariable *gv = new GlobalVariable(StrConstant->getType(), + true, + GlobalValue::InternalLinkage, + StrConstant, + "", + BB->getParent()->getParent(), + false); + gv->setName(Name); + return gv; + } + Value *CreateGlobalStringPtr(const char *Str = "", const char *Name = "") { + Value *gv = CreateGlobalString(Str, Name); + Value *zero = ConstantInt::get(Type::Int32Ty, 0); + Value *Args[] = { zero, zero }; + return CreateGEP(gv, Args, Args+2, Name); + } + //===--------------------------------------------------------------------===// + // Instruction creation methods: Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + Value *CreateTrunc(Value *V, const Type *DestTy, const char *Name = "") { + return CreateCast(Instruction::Trunc, V, DestTy, Name); + } + Value *CreateZExt(Value *V, const Type *DestTy, const char *Name = "") { + return CreateCast(Instruction::ZExt, V, DestTy, Name); + } + Value *CreateSExt(Value *V, const Type *DestTy, const char *Name = "") { + return CreateCast(Instruction::SExt, V, DestTy, Name); + } + Value *CreateFPToUI(Value *V, const Type *DestTy, const char *Name = ""){ + return CreateCast(Instruction::FPToUI, V, DestTy, Name); + } + Value *CreateFPToSI(Value *V, const Type *DestTy, const char *Name = ""){ + return CreateCast(Instruction::FPToSI, V, DestTy, Name); + } + Value *CreateUIToFP(Value *V, const Type *DestTy, const char *Name = ""){ + return CreateCast(Instruction::UIToFP, V, DestTy, Name); + } + Value *CreateSIToFP(Value *V, const Type *DestTy, const char *Name = ""){ + return CreateCast(Instruction::SIToFP, V, DestTy, Name); + } + Value *CreateFPTrunc(Value *V, const Type *DestTy, + const char *Name = "") { + return CreateCast(Instruction::FPTrunc, V, DestTy, Name); + } + Value *CreateFPExt(Value *V, const Type *DestTy, const char *Name = "") { + return CreateCast(Instruction::FPExt, V, DestTy, Name); + } + Value *CreatePtrToInt(Value *V, const Type *DestTy, + const char *Name = "") { + return CreateCast(Instruction::PtrToInt, V, DestTy, Name); + } + Value *CreateIntToPtr(Value *V, const Type *DestTy, + const char *Name = "") { + return CreateCast(Instruction::IntToPtr, V, DestTy, Name); + } + Value *CreateBitCast(Value *V, const Type *DestTy, + const char *Name = "") { + return CreateCast(Instruction::BitCast, V, DestTy, Name); + } + + Value *CreateCast(Instruction::CastOps Op, Value *V, const Type *DestTy, + const char *Name = "") { + if (V->getType() == DestTy) + return V; + if (Constant *VC = dyn_cast<Constant>(V)) + return Folder.CreateCast(Op, VC, DestTy); + return Insert(CastInst::Create(Op, V, DestTy), Name); + } + Value *CreateIntCast(Value *V, const Type *DestTy, bool isSigned, + const char *Name = "") { + if (V->getType() == DestTy) + return V; + if (Constant *VC = dyn_cast<Constant>(V)) + return Folder.CreateIntCast(VC, DestTy, isSigned); + return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); + } + + //===--------------------------------------------------------------------===// + // Instruction creation methods: Compare Instructions + //===--------------------------------------------------------------------===// + + Value *CreateICmpEQ(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); + } + Value *CreateICmpNE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); + } + Value *CreateICmpUGT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); + } + Value *CreateICmpUGE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); + } + Value *CreateICmpULT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); + } + Value *CreateICmpULE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); + } + Value *CreateICmpSGT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); + } + Value *CreateICmpSGE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); + } + Value *CreateICmpSLT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); + } + Value *CreateICmpSLE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); + } + + Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name); + } + Value *CreateFCmpOGT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name); + } + Value *CreateFCmpOGE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name); + } + Value *CreateFCmpOLT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name); + } + Value *CreateFCmpOLE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name); + } + Value *CreateFCmpONE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name); + } + Value *CreateFCmpORD(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name); + } + Value *CreateFCmpUNO(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name); + } + Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name); + } + Value *CreateFCmpUGT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name); + } + Value *CreateFCmpUGE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name); + } + Value *CreateFCmpULT(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name); + } + Value *CreateFCmpULE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name); + } + Value *CreateFCmpUNE(Value *LHS, Value *RHS, const char *Name = "") { + return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name); + } + + Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, + const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateICmp(P, LC, RC); + return Insert(new ICmpInst(P, LHS, RHS), Name); + } + Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, + const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateFCmp(P, LC, RC); + return Insert(new FCmpInst(P, LHS, RHS), Name); + } + + Value *CreateVICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, + const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateVICmp(P, LC, RC); + return Insert(new VICmpInst(P, LHS, RHS), Name); + } + Value *CreateVFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, + const char *Name = "") { + if (Constant *LC = dyn_cast<Constant>(LHS)) + if (Constant *RC = dyn_cast<Constant>(RHS)) + return Folder.CreateVFCmp(P, LC, RC); + return Insert(new VFCmpInst(P, LHS, RHS), Name); + } + + //===--------------------------------------------------------------------===// + // Instruction creation methods: Other Instructions + //===--------------------------------------------------------------------===// + + PHINode *CreatePHI(const Type *Ty, const char *Name = "") { + return Insert(PHINode::Create(Ty), Name); + } + + CallInst *CreateCall(Value *Callee, const char *Name = "") { + return Insert(CallInst::Create(Callee), Name); + } + CallInst *CreateCall(Value *Callee, Value *Arg, const char *Name = "") { + return Insert(CallInst::Create(Callee, Arg), Name); + } + CallInst *CreateCall2(Value *Callee, Value *Arg1, Value *Arg2, + const char *Name = "") { + Value *Args[] = { Arg1, Arg2 }; + return Insert(CallInst::Create(Callee, Args, Args+2), Name); + } + CallInst *CreateCall3(Value *Callee, Value *Arg1, Value *Arg2, Value *Arg3, + const char *Name = "") { + Value *Args[] = { Arg1, Arg2, Arg3 }; + return Insert(CallInst::Create(Callee, Args, Args+3), Name); + } + CallInst *CreateCall4(Value *Callee, Value *Arg1, Value *Arg2, Value *Arg3, + Value *Arg4, const char *Name = "") { + Value *Args[] = { Arg1, Arg2, Arg3, Arg4 }; + return Insert(CallInst::Create(Callee, Args, Args+4), Name); + } + + template<typename InputIterator> + CallInst *CreateCall(Value *Callee, InputIterator ArgBegin, + InputIterator ArgEnd, const char *Name = "") { + return Insert(CallInst::Create(Callee, ArgBegin, ArgEnd), Name); + } + + Value *CreateSelect(Value *C, Value *True, Value *False, + const char *Name = "") { + if (Constant *CC = dyn_cast<Constant>(C)) + if (Constant *TC = dyn_cast<Constant>(True)) + if (Constant *FC = dyn_cast<Constant>(False)) + return Folder.CreateSelect(CC, TC, FC); + return Insert(SelectInst::Create(C, True, False), Name); + } + + VAArgInst *CreateVAArg(Value *List, const Type *Ty, const char *Name = "") { + return Insert(new VAArgInst(List, Ty), Name); + } + + Value *CreateExtractElement(Value *Vec, Value *Idx, + const char *Name = "") { + if (Constant *VC = dyn_cast<Constant>(Vec)) + if (Constant *IC = dyn_cast<Constant>(Idx)) + return Folder.CreateExtractElement(VC, IC); + return Insert(new ExtractElementInst(Vec, Idx), Name); + } + + Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, + const char *Name = "") { + if (Constant *VC = dyn_cast<Constant>(Vec)) + if (Constant *NC = dyn_cast<Constant>(NewElt)) + if (Constant *IC = dyn_cast<Constant>(Idx)) + return Folder.CreateInsertElement(VC, NC, IC); + return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); + } + + Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, + const char *Name = "") { + if (Constant *V1C = dyn_cast<Constant>(V1)) + if (Constant *V2C = dyn_cast<Constant>(V2)) + if (Constant *MC = dyn_cast<Constant>(Mask)) + return Folder.CreateShuffleVector(V1C, V2C, MC); + return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); + } + + Value *CreateExtractValue(Value *Agg, unsigned Idx, + const char *Name = "") { + if (Constant *AggC = dyn_cast<Constant>(Agg)) + return Folder.CreateExtractValue(AggC, &Idx, 1); + return Insert(ExtractValueInst::Create(Agg, Idx), Name); + } + + template<typename InputIterator> + Value *CreateExtractValue(Value *Agg, + InputIterator IdxBegin, + InputIterator IdxEnd, + const char *Name = "") { + if (Constant *AggC = dyn_cast<Constant>(Agg)) + return Folder.CreateExtractValue(AggC, IdxBegin, IdxEnd - IdxBegin); + return Insert(ExtractValueInst::Create(Agg, IdxBegin, IdxEnd), Name); + } + + Value *CreateInsertValue(Value *Agg, Value *Val, unsigned Idx, + const char *Name = "") { + if (Constant *AggC = dyn_cast<Constant>(Agg)) + if (Constant *ValC = dyn_cast<Constant>(Val)) + return Folder.CreateInsertValue(AggC, ValC, &Idx, 1); + return Insert(InsertValueInst::Create(Agg, Val, Idx), Name); + } + + template<typename InputIterator> + Value *CreateInsertValue(Value *Agg, Value *Val, + InputIterator IdxBegin, + InputIterator IdxEnd, + const char *Name = "") { + if (Constant *AggC = dyn_cast<Constant>(Agg)) + if (Constant *ValC = dyn_cast<Constant>(Val)) + return Folder.CreateInsertValue(AggC, ValC, + IdxBegin, IdxEnd - IdxBegin); + return Insert(InsertValueInst::Create(Agg, Val, IdxBegin, IdxEnd), Name); + } + + //===--------------------------------------------------------------------===// + // Utility creation methods + //===--------------------------------------------------------------------===// + + /// CreateIsNull - Return an i1 value testing if \arg Arg is null. + Value *CreateIsNull(Value *Arg, const char *Name = "") { + return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), + Name); + } + + /// CreateIsNotNull - Return an i1 value testing if \arg Arg is not null. + Value *CreateIsNotNull(Value *Arg, const char *Name = "") { + return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), + Name); + } + + /// CreatePtrDiff - Return the i64 difference between two pointer values, + /// dividing out the size of the pointed-to objects. This is intended to + /// implement C-style pointer subtraction. + Value *CreatePtrDiff(Value *LHS, Value *RHS, const char *Name = "") { + assert(LHS->getType() == RHS->getType() && + "Pointer subtraction operand types must match!"); + const PointerType *ArgType = cast<PointerType>(LHS->getType()); + Value *LHS_int = CreatePtrToInt(LHS, Type::Int64Ty); + Value *RHS_int = CreatePtrToInt(RHS, Type::Int64Ty); + Value *Difference = CreateSub(LHS_int, RHS_int); + return CreateSDiv(Difference, + ConstantExpr::getSizeOf(ArgType->getElementType()), + Name); + } +}; + +} + +#endif diff --git a/include/llvm/Support/InstIterator.h b/include/llvm/Support/InstIterator.h new file mode 100644 index 0000000..7d3f883 --- /dev/null +++ b/include/llvm/Support/InstIterator.h @@ -0,0 +1,147 @@ +//===- llvm/Support/InstIterator.h - Classes for inst iteration -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains definitions of two iterators for iterating over the +// instructions in a function. This is effectively a wrapper around a two level +// iterator that can probably be genericized later. +// +// Note that this iterator gets invalidated any time that basic blocks or +// instructions are moved around. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_INSTITERATOR_H +#define LLVM_SUPPORT_INSTITERATOR_H + +#include "llvm/BasicBlock.h" +#include "llvm/Function.h" + +namespace llvm { + +// This class implements inst_begin() & inst_end() for +// inst_iterator and const_inst_iterator's. +// +template <class _BB_t, class _BB_i_t, class _BI_t, class _II_t> +class InstIterator { + typedef _BB_t BBty; + typedef _BB_i_t BBIty; + typedef _BI_t BIty; + typedef _II_t IIty; + _BB_t *BBs; // BasicBlocksType + _BB_i_t BB; // BasicBlocksType::iterator + _BI_t BI; // BasicBlock::iterator +public: + typedef std::bidirectional_iterator_tag iterator_category; + typedef IIty value_type; + typedef signed difference_type; + typedef IIty* pointer; + typedef IIty& reference; + + // Default constructor + InstIterator() {} + + // Copy constructor... + template<typename A, typename B, typename C, typename D> + InstIterator(const InstIterator<A,B,C,D> &II) + : BBs(II.BBs), BB(II.BB), BI(II.BI) {} + + template<typename A, typename B, typename C, typename D> + InstIterator(InstIterator<A,B,C,D> &II) + : BBs(II.BBs), BB(II.BB), BI(II.BI) {} + + template<class M> InstIterator(M &m) + : BBs(&m.getBasicBlockList()), BB(BBs->begin()) { // begin ctor + if (BB != BBs->end()) { + BI = BB->begin(); + advanceToNextBB(); + } + } + + template<class M> InstIterator(M &m, bool) + : BBs(&m.getBasicBlockList()), BB(BBs->end()) { // end ctor + } + + // Accessors to get at the underlying iterators... + inline BBIty &getBasicBlockIterator() { return BB; } + inline BIty &getInstructionIterator() { return BI; } + + inline reference operator*() const { return *BI; } + inline pointer operator->() const { return &operator*(); } + + inline bool operator==(const InstIterator &y) const { + return BB == y.BB && (BB == BBs->end() || BI == y.BI); + } + inline bool operator!=(const InstIterator& y) const { + return !operator==(y); + } + + InstIterator& operator++() { + ++BI; + advanceToNextBB(); + return *this; + } + inline InstIterator operator++(int) { + InstIterator tmp = *this; ++*this; return tmp; + } + + InstIterator& operator--() { + while (BB == BBs->end() || BI == BB->begin()) { + --BB; + BI = BB->end(); + } + --BI; + return *this; + } + inline InstIterator operator--(int) { + InstIterator tmp = *this; --*this; return tmp; + } + + inline bool atEnd() const { return BB == BBs->end(); } + +private: + inline void advanceToNextBB() { + // The only way that the II could be broken is if it is now pointing to + // the end() of the current BasicBlock and there are successor BBs. + while (BI == BB->end()) { + ++BB; + if (BB == BBs->end()) break; + BI = BB->begin(); + } + } +}; + + +typedef InstIterator<iplist<BasicBlock>, + Function::iterator, BasicBlock::iterator, + Instruction> inst_iterator; +typedef InstIterator<const iplist<BasicBlock>, + Function::const_iterator, + BasicBlock::const_iterator, + const Instruction> const_inst_iterator; + +inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); } +inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); } +inline const_inst_iterator inst_begin(const Function *F) { + return const_inst_iterator(*F); +} +inline const_inst_iterator inst_end(const Function *F) { + return const_inst_iterator(*F, true); +} +inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); } +inline inst_iterator inst_end(Function &F) { return inst_iterator(F, true); } +inline const_inst_iterator inst_begin(const Function &F) { + return const_inst_iterator(F); +} +inline const_inst_iterator inst_end(const Function &F) { + return const_inst_iterator(F, true); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/InstVisitor.h b/include/llvm/Support/InstVisitor.h new file mode 100644 index 0000000..597cc9d --- /dev/null +++ b/include/llvm/Support/InstVisitor.h @@ -0,0 +1,221 @@ +//===- llvm/Support/InstVisitor.h - Define instruction visitors -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + +#ifndef LLVM_SUPPORT_INSTVISITOR_H +#define LLVM_SUPPORT_INSTVISITOR_H + +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/Module.h" + +namespace llvm { + +// We operate on opaque instruction classes, so forward declare all instruction +// types now... +// +#define HANDLE_INST(NUM, OPCODE, CLASS) class CLASS; +#include "llvm/Instruction.def" + +#define DELEGATE(CLASS_TO_VISIT) \ + return static_cast<SubClass*>(this)-> \ + visit##CLASS_TO_VISIT(static_cast<CLASS_TO_VISIT&>(I)) + + +/// @brief Base class for instruction visitors +/// +/// Instruction visitors are used when you want to perform different action for +/// different kinds of instruction without without having to use lots of casts +/// and a big switch statement (in your code that is). +/// +/// To define your own visitor, inherit from this class, specifying your +/// new type for the 'SubClass' template parameter, and "override" visitXXX +/// functions in your class. I say "overriding" because this class is defined +/// in terms of statically resolved overloading, not virtual functions. +/// +/// For example, here is a visitor that counts the number of malloc +/// instructions processed: +/// +/// /// Declare the class. Note that we derive from InstVisitor instantiated +/// /// with _our new subclasses_ type. +/// /// +/// struct CountMallocVisitor : public InstVisitor<CountMallocVisitor> { +/// unsigned Count; +/// CountMallocVisitor() : Count(0) {} +/// +/// void visitMallocInst(MallocInst &MI) { ++Count; } +/// }; +/// +/// And this class would be used like this: +/// CountMallocVistor CMV; +/// CMV.visit(function); +/// NumMallocs = CMV.Count; +/// +/// The defined has 'visit' methods for Instruction, and also for BasicBlock, +/// Function, and Module, which recursively process all conained instructions. +/// +/// Note that if you don't implement visitXXX for some instruction type, +/// the visitXXX method for instruction superclass will be invoked. So +/// if instructions are added in the future, they will be automatically +/// supported, if you handle on of their superclasses. +/// +/// The optional second template argument specifies the type that instruction +/// visitation functions should return. If you specify this, you *MUST* provide +/// an implementation of visitInstruction though!. +/// +/// Note that this class is specifically designed as a template to avoid +/// virtual function call overhead. Defining and using an InstVisitor is just +/// as efficient as having your own switch statement over the instruction +/// opcode. +template<typename SubClass, typename RetTy=void> +class InstVisitor { + //===--------------------------------------------------------------------===// + // Interface code - This is the public interface of the InstVisitor that you + // use to visit instructions... + // + +public: + // Generic visit method - Allow visitation to all instructions in a range + template<class Iterator> + void visit(Iterator Start, Iterator End) { + while (Start != End) + static_cast<SubClass*>(this)->visit(*Start++); + } + + // Define visitors for functions and basic blocks... + // + void visit(Module &M) { + static_cast<SubClass*>(this)->visitModule(M); + visit(M.begin(), M.end()); + } + void visit(Function &F) { + static_cast<SubClass*>(this)->visitFunction(F); + visit(F.begin(), F.end()); + } + void visit(BasicBlock &BB) { + static_cast<SubClass*>(this)->visitBasicBlock(BB); + visit(BB.begin(), BB.end()); + } + + // Forwarding functions so that the user can visit with pointers AND refs. + void visit(Module *M) { visit(*M); } + void visit(Function *F) { visit(*F); } + void visit(BasicBlock *BB) { visit(*BB); } + RetTy visit(Instruction *I) { return visit(*I); } + + // visit - Finally, code to visit an instruction... + // + RetTy visit(Instruction &I) { + switch (I.getOpcode()) { + default: assert(0 && "Unknown instruction type encountered!"); + abort(); + // Build the switch statement using the Instruction.def file... +#define HANDLE_INST(NUM, OPCODE, CLASS) \ + case Instruction::OPCODE: return \ + static_cast<SubClass*>(this)-> \ + visit##OPCODE(static_cast<CLASS&>(I)); +#include "llvm/Instruction.def" + } + } + + //===--------------------------------------------------------------------===// + // Visitation functions... these functions provide default fallbacks in case + // the user does not specify what to do for a particular instruction type. + // The default behavior is to generalize the instruction type to its subtype + // and try visiting the subtype. All of this should be inlined perfectly, + // because there are no virtual functions to get in the way. + // + + // When visiting a module, function or basic block directly, these methods get + // called to indicate when transitioning into a new unit. + // + void visitModule (Module &M) {} + void visitFunction (Function &F) {} + void visitBasicBlock(BasicBlock &BB) {} + + // Define instruction specific visitor functions that can be overridden to + // handle SPECIFIC instructions. These functions automatically define + // visitMul to proxy to visitBinaryOperator for instance in case the user does + // not need this generality. + // + // The one problem case we have to handle here though is that the PHINode + // class and opcode name are the exact same. Because of this, we cannot + // define visitPHINode (the inst version) to forward to visitPHINode (the + // generic version) without multiply defined symbols and recursion. To handle + // this, we do not autoexpand "Other" instructions, we do it manually. + // +#define HANDLE_INST(NUM, OPCODE, CLASS) \ + RetTy visit##OPCODE(CLASS &I) { DELEGATE(CLASS); } +#include "llvm/Instruction.def" + + // Specific Instruction type classes... note that all of the casts are + // necessary because we use the instruction classes as opaque types... + // + RetTy visitReturnInst(ReturnInst &I) { DELEGATE(TerminatorInst);} + RetTy visitBranchInst(BranchInst &I) { DELEGATE(TerminatorInst);} + RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);} + RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(TerminatorInst);} + RetTy visitUnwindInst(UnwindInst &I) { DELEGATE(TerminatorInst);} + RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);} + RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);} + RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);} + RetTy visitVICmpInst(VICmpInst &I) { DELEGATE(CmpInst);} + RetTy visitVFCmpInst(VFCmpInst &I) { DELEGATE(CmpInst);} + RetTy visitMallocInst(MallocInst &I) { DELEGATE(AllocationInst);} + RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(AllocationInst);} + RetTy visitFreeInst(FreeInst &I) { DELEGATE(Instruction); } + RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); } + RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction); } + RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction); } + RetTy visitPHINode(PHINode &I) { DELEGATE(Instruction); } + RetTy visitTruncInst(TruncInst &I) { DELEGATE(CastInst); } + RetTy visitZExtInst(ZExtInst &I) { DELEGATE(CastInst); } + RetTy visitSExtInst(SExtInst &I) { DELEGATE(CastInst); } + RetTy visitFPTruncInst(FPTruncInst &I) { DELEGATE(CastInst); } + RetTy visitFPExtInst(FPExtInst &I) { DELEGATE(CastInst); } + RetTy visitFPToUIInst(FPToUIInst &I) { DELEGATE(CastInst); } + RetTy visitFPToSIInst(FPToSIInst &I) { DELEGATE(CastInst); } + RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst); } + RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst); } + RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst); } + RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst); } + RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst); } + RetTy visitSelectInst(SelectInst &I) { DELEGATE(Instruction); } + RetTy visitCallInst(CallInst &I) { DELEGATE(Instruction); } + RetTy visitVAArgInst(VAArgInst &I) { DELEGATE(Instruction); } + RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);} + RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction); } + RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction); } + RetTy visitExtractValueInst(ExtractValueInst &I) { DELEGATE(Instruction);} + RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); } + + // Next level propagators... if the user does not overload a specific + // instruction type, they can overload one of these to get the whole class + // of instructions... + // + RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction); } + RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction); } + RetTy visitAllocationInst(AllocationInst &I) { DELEGATE(Instruction); } + RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction); } + RetTy visitCastInst(CastInst &I) { DELEGATE(Instruction); } + + // If the user wants a 'default' case, they can choose to override this + // function. If this function is not overloaded in the users subclass, then + // this instruction just gets ignored. + // + // Note that you MUST override this function if your return type is not void. + // + void visitInstruction(Instruction &I) {} // Ignore unhandled instructions +}; + +#undef DELEGATE + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/LeakDetector.h b/include/llvm/Support/LeakDetector.h new file mode 100644 index 0000000..8d74ac6 --- /dev/null +++ b/include/llvm/Support/LeakDetector.h @@ -0,0 +1,91 @@ +//===-- llvm/Support/LeakDetector.h - Provide leak detection ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a class that can be used to provide very simple memory leak +// checks for an API. Basically LLVM uses this to make sure that Instructions, +// for example, are deleted when they are supposed to be, and not leaked away. +// +// When compiling with NDEBUG (Release build), this class does nothing, thus +// adding no checking overhead to release builds. Note that this class is +// implemented in a very simple way, requiring completely manual manipulation +// and checking for garbage, but this is intentional: users should not be using +// this API, only other APIs should. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_LEAKDETECTOR_H +#define LLVM_SUPPORT_LEAKDETECTOR_H + +#include <string> + +namespace llvm { + +class Value; + +struct LeakDetector { + /// addGarbageObject - Add a pointer to the internal set of "garbage" object + /// pointers. This should be called when objects are created, or if they are + /// taken out of an owning collection. + /// + static void addGarbageObject(void *Object) { +#ifndef NDEBUG + addGarbageObjectImpl(Object); +#endif + } + + /// removeGarbageObject - Remove a pointer from our internal representation of + /// our "garbage" objects. This should be called when an object is added to + /// an "owning" collection. + /// + static void removeGarbageObject(void *Object) { +#ifndef NDEBUG + removeGarbageObjectImpl(Object); +#endif + } + + /// checkForGarbage - Traverse the internal representation of garbage + /// pointers. If there are any pointers that have been add'ed, but not + /// remove'd, big obnoxious warnings about memory leaks are issued. + /// + /// The specified message will be printed indicating when the check was + /// performed. + /// + static void checkForGarbage(const std::string &Message) { +#ifndef NDEBUG + checkForGarbageImpl(Message); +#endif + } + + /// Overload the normal methods to work better with Value*'s because they are + /// by far the most common in LLVM. This does not affect the actual + /// functioning of this class, it just makes the warning messages nicer. + /// + static void addGarbageObject(const Value *Object) { +#ifndef NDEBUG + addGarbageObjectImpl(Object); +#endif + } + static void removeGarbageObject(const Value *Object) { +#ifndef NDEBUG + removeGarbageObjectImpl(Object); +#endif + } + +private: + // If we are debugging, the actual implementations will be called... + static void addGarbageObjectImpl(const Value *Object); + static void removeGarbageObjectImpl(const Value *Object); + static void addGarbageObjectImpl(void *Object); + static void removeGarbageObjectImpl(void *Object); + static void checkForGarbageImpl(const std::string &Message); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/ManagedStatic.h b/include/llvm/Support/ManagedStatic.h new file mode 100644 index 0000000..619cc20 --- /dev/null +++ b/include/llvm/Support/ManagedStatic.h @@ -0,0 +1,120 @@ +//===-- llvm/Support/ManagedStatic.h - Static Global wrapper ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ManagedStatic class and the llvm_shutdown() function. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_MANAGED_STATIC_H +#define LLVM_SUPPORT_MANAGED_STATIC_H + +#include "llvm/System/Atomic.h" + +namespace llvm { + +/// object_creator - Helper method for ManagedStatic. +template<class C> +void* object_creator() { + return new C(); +} + +/// object_deleter - Helper method for ManagedStatic. +/// +template<class C> +void object_deleter(void *Ptr) { + delete (C*)Ptr; +} + +/// ManagedStaticBase - Common base class for ManagedStatic instances. +class ManagedStaticBase { +protected: + // This should only be used as a static variable, which guarantees that this + // will be zero initialized. + mutable void *Ptr; + mutable void (*DeleterFn)(void*); + mutable const ManagedStaticBase *Next; + + void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const; +public: + /// isConstructed - Return true if this object has not been created yet. + bool isConstructed() const { return Ptr != 0; } + + void destroy() const; +}; + +/// ManagedStatic - This transparently changes the behavior of global statics to +/// be lazily constructed on demand (good for reducing startup times of dynamic +/// libraries that link in LLVM components) and for making destruction be +/// explicit through the llvm_shutdown() function call. +/// +template<class C> +class ManagedStatic : public ManagedStaticBase { +public: + + // Accessors. + C &operator*() { + void* tmp = Ptr; + sys::MemoryFence(); + if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>); + + return *static_cast<C*>(Ptr); + } + C *operator->() { + void* tmp = Ptr; + sys::MemoryFence(); + if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>); + + return static_cast<C*>(Ptr); + } + const C &operator*() const { + void* tmp = Ptr; + sys::MemoryFence(); + if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>); + + return *static_cast<C*>(Ptr); + } + const C *operator->() const { + void* tmp = Ptr; + sys::MemoryFence(); + if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>); + + return static_cast<C*>(Ptr); + } +}; + +template<void (*CleanupFn)(void*)> +class ManagedCleanup : public ManagedStaticBase { +public: + void Register() { RegisterManagedStatic(0, CleanupFn); } +}; + + +/// llvm_start_multithreaded - Allocate and initialize structures needed to +/// make LLVM safe for multithreading. The return value indicates whether +/// multithreaded initialization succeeded. LLVM will still be operational +/// on "failed" return, but will not be safe to run multithreaded. +bool llvm_start_multithreaded(); + +/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables. +void llvm_shutdown(); + + +/// llvm_shutdown_obj - This is a simple helper class that calls +/// llvm_shutdown() when it is destroyed. +struct llvm_shutdown_obj { + llvm_shutdown_obj() { } + explicit llvm_shutdown_obj(bool multithreaded) { + if (multithreaded) llvm_start_multithreaded(); + } + ~llvm_shutdown_obj() { llvm_shutdown(); } +}; + +} + +#endif diff --git a/include/llvm/Support/Mangler.h b/include/llvm/Support/Mangler.h new file mode 100644 index 0000000..8f672bd --- /dev/null +++ b/include/llvm/Support/Mangler.h @@ -0,0 +1,112 @@ +//===-- llvm/Support/Mangler.h - Self-contained name mangler ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Unified name mangler for various backends. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_MANGLER_H +#define LLVM_SUPPORT_MANGLER_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include <string> + +namespace llvm { +class Type; +class Module; +class Value; +class GlobalValue; + +class Mangler { + /// Prefix - This string is added to each symbol that is emitted, unless the + /// symbol is marked as not needing this prefix. + const char *Prefix; + + /// PrivatePrefix - This string is emitted before each symbol with private + /// linkage. + const char *PrivatePrefix; + + /// UseQuotes - If this is set, the target accepts global names in quotes, + /// e.g. "foo bar" is a legal name. This syntax is used instead of escaping + /// the space character. By default, this is false. + bool UseQuotes; + + /// PreserveAsmNames - If this is set, the asm escape character is not removed + /// from names with 'asm' specifiers. + bool PreserveAsmNames; + + /// Memo - This is used to remember the name that we assign a value. + /// + DenseMap<const Value*, std::string> Memo; + + /// Count - This simple counter is used to unique value names. + /// + unsigned Count; + + /// TypeMap - If the client wants us to unique types, this keeps track of the + /// current assignments and TypeCounter keeps track of the next id to assign. + DenseMap<const Type*, unsigned> TypeMap; + unsigned TypeCounter; + + /// AcceptableChars - This bitfield contains a one for each character that is + /// allowed to be part of an unmangled name. + unsigned AcceptableChars[256/32]; +public: + + // Mangler ctor - if a prefix is specified, it will be prepended onto all + // symbols. + Mangler(Module &M, const char *Prefix = "", const char *privatePrefix = ""); + + /// setUseQuotes - If UseQuotes is set to true, this target accepts quoted + /// strings for assembler labels. + void setUseQuotes(bool Val) { UseQuotes = Val; } + + /// setPreserveAsmNames - If the mangler should not strip off the asm name + /// @verbatim identifier (\001), this should be set. @endverbatim + void setPreserveAsmNames(bool Val) { PreserveAsmNames = Val; } + + /// Acceptable Characters - This allows the target to specify which characters + /// are acceptable to the assembler without being mangled. By default we + /// allow letters, numbers, '_', '$', and '.', which is what GAS accepts. + void markCharAcceptable(unsigned char X) { + AcceptableChars[X/32] |= 1 << (X&31); + } + void markCharUnacceptable(unsigned char X) { + AcceptableChars[X/32] &= ~(1 << (X&31)); + } + bool isCharAcceptable(unsigned char X) const { + return (AcceptableChars[X/32] & (1 << (X&31))) != 0; + } + + /// getValueName - Returns the mangled name of V, an LLVM Value, + /// in the current module. + /// + std::string getValueName(const GlobalValue *V, const char *Suffix = ""); + std::string getValueName(const Value *V); + + /// makeNameProper - We don't want identifier names with ., space, or + /// - in them, so we mangle these characters into the strings "d_", + /// "s_", and "D_", respectively. This is a very simple mangling that + /// doesn't guarantee unique names for values. getValueName already + /// does this for you, so there's no point calling it on the result + /// from getValueName. + /// + std::string makeNameProper(const std::string &x, const char *Prefix = 0, + const char *PrivatePrefix = 0); + +private: + /// getTypeID - Return a unique ID for the specified LLVM type. + /// + unsigned getTypeID(const Type *Ty); +}; + +} // End llvm namespace + +#endif // LLVM_SUPPORT_MANGLER_H diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h new file mode 100644 index 0000000..85e19ac --- /dev/null +++ b/include/llvm/Support/MathExtras.h @@ -0,0 +1,437 @@ +//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains some functions that are useful for math stuff. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_MATHEXTRAS_H +#define LLVM_SUPPORT_MATHEXTRAS_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +// NOTE: The following support functions use the _32/_64 extensions instead of +// type overloading so that signed and unsigned integers can be used without +// ambiguity. + +/// Hi_32 - This function returns the high 32 bits of a 64 bit value. +inline uint32_t Hi_32(uint64_t Value) { + return static_cast<uint32_t>(Value >> 32); +} + +/// Lo_32 - This function returns the low 32 bits of a 64 bit value. +inline uint32_t Lo_32(uint64_t Value) { + return static_cast<uint32_t>(Value); +} + +/// is?Type - these functions produce optimal testing for integer data types. +inline bool isInt8 (int64_t Value) { + return static_cast<int8_t>(Value) == Value; +} +inline bool isUInt8 (int64_t Value) { + return static_cast<uint8_t>(Value) == Value; +} +inline bool isInt16 (int64_t Value) { + return static_cast<int16_t>(Value) == Value; +} +inline bool isUInt16(int64_t Value) { + return static_cast<uint16_t>(Value) == Value; +} +inline bool isInt32 (int64_t Value) { + return static_cast<int32_t>(Value) == Value; +} +inline bool isUInt32(int64_t Value) { + return static_cast<uint32_t>(Value) == Value; +} + +/// isMask_32 - This function returns true if the argument is a sequence of ones +/// starting at the least significant bit with the remainder zero (32 bit +/// version). Ex. isMask_32(0x0000FFFFU) == true. +inline bool isMask_32(uint32_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// isMask_64 - This function returns true if the argument is a sequence of ones +/// starting at the least significant bit with the remainder zero (64 bit +/// version). +inline bool isMask_64(uint64_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// isShiftedMask_32 - This function returns true if the argument contains a +/// sequence of ones with the remainder zero (32 bit version.) +/// Ex. isShiftedMask_32(0x0000FF00U) == true. +inline bool isShiftedMask_32(uint32_t Value) { + return isMask_32((Value - 1) | Value); +} + +/// isShiftedMask_64 - This function returns true if the argument contains a +/// sequence of ones with the remainder zero (64 bit version.) +inline bool isShiftedMask_64(uint64_t Value) { + return isMask_64((Value - 1) | Value); +} + +/// isPowerOf2_32 - This function returns true if the argument is a power of +/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) +inline bool isPowerOf2_32(uint32_t Value) { + return Value && !(Value & (Value - 1)); +} + +/// isPowerOf2_64 - This function returns true if the argument is a power of two +/// > 0 (64 bit edition.) +inline bool isPowerOf2_64(uint64_t Value) { + return Value && !(Value & (Value - int64_t(1L))); +} + +/// ByteSwap_16 - This function returns a byte-swapped representation of the +/// 16-bit argument, Value. +inline uint16_t ByteSwap_16(uint16_t Value) { +#if defined(_MSC_VER) && !defined(_DEBUG) + // The DLL version of the runtime lacks these functions (bug!?), but in a + // release build they're replaced with BSWAP instructions anyway. + return _byteswap_ushort(Value); +#else + uint16_t Hi = Value << 8; + uint16_t Lo = Value >> 8; + return Hi | Lo; +#endif +} + +/// ByteSwap_32 - This function returns a byte-swapped representation of the +/// 32-bit argument, Value. +inline uint32_t ByteSwap_32(uint32_t Value) { +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) + return __builtin_bswap32(Value); +#elif defined(_MSC_VER) && !defined(_DEBUG) + return _byteswap_ulong(Value); +#else + uint32_t Byte0 = Value & 0x000000FF; + uint32_t Byte1 = Value & 0x0000FF00; + uint32_t Byte2 = Value & 0x00FF0000; + uint32_t Byte3 = Value & 0xFF000000; + return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24); +#endif +} + +/// ByteSwap_64 - This function returns a byte-swapped representation of the +/// 64-bit argument, Value. +inline uint64_t ByteSwap_64(uint64_t Value) { +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) + return __builtin_bswap64(Value); +#elif defined(_MSC_VER) && !defined(_DEBUG) + return _byteswap_uint64(Value); +#else + uint64_t Hi = ByteSwap_32(uint32_t(Value)); + uint32_t Lo = ByteSwap_32(uint32_t(Value >> 32)); + return (Hi << 32) | Lo; +#endif +} + +/// CountLeadingZeros_32 - this function performs the platform optimal form of +/// counting the number of zeros from the most significant bit to the first one +/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8. +/// Returns 32 if the word is zero. +inline unsigned CountLeadingZeros_32(uint32_t Value) { + unsigned Count; // result +#if __GNUC__ >= 4 + // PowerPC is defined for __builtin_clz(0) +#if !defined(__ppc__) && !defined(__ppc64__) + if (!Value) return 32; +#endif + Count = __builtin_clz(Value); +#else + if (!Value) return 32; + Count = 0; + // bisecton method for count leading zeros + for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) { + uint32_t Tmp = Value >> Shift; + if (Tmp) { + Value = Tmp; + } else { + Count |= Shift; + } + } +#endif + return Count; +} + +/// CountLeadingOnes_32 - this function performs the operation of +/// counting the number of ones from the most significant bit to the first zero +/// bit. Ex. CountLeadingOnes_32(0xFF0FFF00) == 8. +/// Returns 32 if the word is all ones. +inline unsigned CountLeadingOnes_32(uint32_t Value) { + return CountLeadingZeros_32(~Value); +} + +/// CountLeadingZeros_64 - This function performs the platform optimal form +/// of counting the number of zeros from the most significant bit to the first +/// one bit (64 bit edition.) +/// Returns 64 if the word is zero. +inline unsigned CountLeadingZeros_64(uint64_t Value) { + unsigned Count; // result +#if __GNUC__ >= 4 + // PowerPC is defined for __builtin_clzll(0) +#if !defined(__ppc__) && !defined(__ppc64__) + if (!Value) return 64; +#endif + Count = __builtin_clzll(Value); +#else + if (sizeof(long) == sizeof(int64_t)) { + if (!Value) return 64; + Count = 0; + // bisecton method for count leading zeros + for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) { + uint64_t Tmp = Value >> Shift; + if (Tmp) { + Value = Tmp; + } else { + Count |= Shift; + } + } + } else { + // get hi portion + uint32_t Hi = Hi_32(Value); + + // if some bits in hi portion + if (Hi) { + // leading zeros in hi portion plus all bits in lo portion + Count = CountLeadingZeros_32(Hi); + } else { + // get lo portion + uint32_t Lo = Lo_32(Value); + // same as 32 bit value + Count = CountLeadingZeros_32(Lo)+32; + } + } +#endif + return Count; +} + +/// CountLeadingOnes_64 - This function performs the operation +/// of counting the number of ones from the most significant bit to the first +/// zero bit (64 bit edition.) +/// Returns 64 if the word is all ones. +inline unsigned CountLeadingOnes_64(uint64_t Value) { + return CountLeadingZeros_64(~Value); +} + +/// CountTrailingZeros_32 - this function performs the platform optimal form of +/// counting the number of zeros from the least significant bit to the first one +/// bit. Ex. CountTrailingZeros_32(0xFF00FF00) == 8. +/// Returns 32 if the word is zero. +inline unsigned CountTrailingZeros_32(uint32_t Value) { +#if __GNUC__ >= 4 + return Value ? __builtin_ctz(Value) : 32; +#else + static const unsigned Mod37BitPosition[] = { + 32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13, + 4, 7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9, + 5, 20, 8, 19, 18 + }; + return Mod37BitPosition[(-Value & Value) % 37]; +#endif +} + +/// CountTrailingOnes_32 - this function performs the operation of +/// counting the number of ones from the least significant bit to the first zero +/// bit. Ex. CountTrailingOnes_32(0x00FF00FF) == 8. +/// Returns 32 if the word is all ones. +inline unsigned CountTrailingOnes_32(uint32_t Value) { + return CountTrailingZeros_32(~Value); +} + +/// CountTrailingZeros_64 - This function performs the platform optimal form +/// of counting the number of zeros from the least significant bit to the first +/// one bit (64 bit edition.) +/// Returns 64 if the word is zero. +inline unsigned CountTrailingZeros_64(uint64_t Value) { +#if __GNUC__ >= 4 + return Value ? __builtin_ctzll(Value) : 64; +#else + static const unsigned Mod67Position[] = { + 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54, + 4, 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55, + 47, 5, 32, 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27, + 29, 50, 43, 46, 31, 37, 21, 57, 52, 8, 26, 49, 45, 36, 56, + 7, 48, 35, 6, 34, 33, 0 + }; + return Mod67Position[(-Value & Value) % 67]; +#endif +} + +/// CountTrailingOnes_64 - This function performs the operation +/// of counting the number of ones from the least significant bit to the first +/// zero bit (64 bit edition.) +/// Returns 64 if the word is all ones. +inline unsigned CountTrailingOnes_64(uint64_t Value) { + return CountTrailingZeros_64(~Value); +} + +/// CountPopulation_32 - this function counts the number of set bits in a value. +/// Ex. CountPopulation(0xF000F000) = 8 +/// Returns 0 if the word is zero. +inline unsigned CountPopulation_32(uint32_t Value) { +#if __GNUC__ >= 4 + return __builtin_popcount(Value); +#else + uint32_t v = Value - ((Value >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; +#endif +} + +/// CountPopulation_64 - this function counts the number of set bits in a value, +/// (64 bit edition.) +inline unsigned CountPopulation_64(uint64_t Value) { +#if __GNUC__ >= 4 + return __builtin_popcountll(Value); +#else + uint64_t v = Value - ((Value >> 1) & 0x5555555555555555ULL); + v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); + v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); +#endif +} + +/// Log2_32 - This function returns the floor log base 2 of the specified value, +/// -1 if the value is zero. (32 bit edition.) +/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 +inline unsigned Log2_32(uint32_t Value) { + return 31 - CountLeadingZeros_32(Value); +} + +/// Log2_64 - This function returns the floor log base 2 of the specified value, +/// -1 if the value is zero. (64 bit edition.) +inline unsigned Log2_64(uint64_t Value) { + return 63 - CountLeadingZeros_64(Value); +} + +/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified +/// value, 32 if the value is zero. (32 bit edition). +/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 +inline unsigned Log2_32_Ceil(uint32_t Value) { + return 32-CountLeadingZeros_32(Value-1); +} + +/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified +/// value, 64 if the value is zero. (64 bit edition.) +inline unsigned Log2_64_Ceil(uint64_t Value) { + return 64-CountLeadingZeros_64(Value-1); +} + +/// GreatestCommonDivisor64 - Return the greatest common divisor of the two +/// values using Euclid's algorithm. +inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { + while (B) { + uint64_t T = B; + B = A % B; + A = T; + } + return A; +} + +/// BitsToDouble - This function takes a 64-bit integer and returns the bit +/// equivalent double. +inline double BitsToDouble(uint64_t Bits) { + union { + uint64_t L; + double D; + } T; + T.L = Bits; + return T.D; +} + +/// BitsToFloat - This function takes a 32-bit integer and returns the bit +/// equivalent float. +inline float BitsToFloat(uint32_t Bits) { + union { + uint32_t I; + float F; + } T; + T.I = Bits; + return T.F; +} + +/// DoubleToBits - This function takes a double and returns the bit +/// equivalent 64-bit integer. Note that copying doubles around +/// changes the bits of NaNs on some hosts, notably x86, so this +/// routine cannot be used if these bits are needed. +inline uint64_t DoubleToBits(double Double) { + union { + uint64_t L; + double D; + } T; + T.D = Double; + return T.L; +} + +/// FloatToBits - This function takes a float and returns the bit +/// equivalent 32-bit integer. Note that copying floats around +/// changes the bits of NaNs on some hosts, notably x86, so this +/// routine cannot be used if these bits are needed. +inline uint32_t FloatToBits(float Float) { + union { + uint32_t I; + float F; + } T; + T.F = Float; + return T.I; +} + +/// Platform-independent wrappers for the C99 isnan() function. +int IsNAN(float f); +int IsNAN(double d); + +/// Platform-independent wrappers for the C99 isinf() function. +int IsInf(float f); +int IsInf(double d); + +/// MinAlign - A and B are either alignments or offsets. Return the minimum +/// alignment that may be assumed after adding the two together. +static inline uint64_t MinAlign(uint64_t A, uint64_t B) { + // The largest power of 2 that divides both A and B. + return (A | B) & -(A | B); +} + +/// NextPowerOf2 - Returns the next power of two (in 64-bits) +/// that is strictly greater than A. Returns zero on overflow. +static inline uint64_t NextPowerOf2(uint64_t A) { + A |= (A >> 1); + A |= (A >> 2); + A |= (A >> 4); + A |= (A >> 8); + A |= (A >> 16); + A |= (A >> 32); + return A + 1; +} + +/// RoundUpToAlignment - Returns the next integer (mod 2**64) that is +/// greater than or equal to \arg Value and is a multiple of \arg +/// Align. Align must be non-zero. +/// +/// Examples: +/// RoundUpToAlignment(5, 8) = 8 +/// RoundUpToAlignment(17, 8) = 24 +/// RoundUpToAlignment(~0LL, 8) = 0 +inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) { + return ((Value + Align - 1) / Align) * Align; +} + +/// abs64 - absolute value of a 64-bit int. Not all environments support +/// "abs" on whatever their name for the 64-bit int type is. The absolute +/// value of the largest negative number is undefined, as with "abs". +inline int64_t abs64(int64_t x) { + return (x < 0) ? -x : x; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h new file mode 100644 index 0000000..58a217f --- /dev/null +++ b/include/llvm/Support/MemoryBuffer.h @@ -0,0 +1,109 @@ +//===--- MemoryBuffer.h - Memory Buffer Interface ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MemoryBuffer interface. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_MEMORYBUFFER_H +#define LLVM_SUPPORT_MEMORYBUFFER_H + +#include "llvm/Support/DataTypes.h" +#include <string> + +namespace llvm { + +/// MemoryBuffer - This interface provides simple read-only access to a block +/// of memory, and provides simple methods for reading files and standard input +/// into a memory buffer. In addition to basic access to the characters in the +/// file, this interface guarantees you can read one character past the end of +/// @verbatim the file, and that this character will read as '\0'. @endverbatim +class MemoryBuffer { + const char *BufferStart; // Start of the buffer. + const char *BufferEnd; // End of the buffer. + + /// MustDeleteBuffer - True if we allocated this buffer. If so, the + /// destructor must know the delete[] it. + bool MustDeleteBuffer; +protected: + MemoryBuffer() : MustDeleteBuffer(false) {} + void init(const char *BufStart, const char *BufEnd); + void initCopyOf(const char *BufStart, const char *BufEnd); +public: + virtual ~MemoryBuffer(); + + const char *getBufferStart() const { return BufferStart; } + const char *getBufferEnd() const { return BufferEnd; } + size_t getBufferSize() const { return BufferEnd-BufferStart; } + + /// getBufferIdentifier - Return an identifier for this buffer, typically the + /// filename it was read from. + virtual const char *getBufferIdentifier() const { + return "Unknown buffer"; + } + + /// getFile - Open the specified file as a MemoryBuffer, returning a new + /// MemoryBuffer if successful, otherwise returning null. If FileSize is + /// specified, this means that the client knows that the file exists and that + /// it has the specified size. + static MemoryBuffer *getFile(const char *Filename, + std::string *ErrStr = 0, + int64_t FileSize = -1); + + /// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note + /// that EndPtr[0] must be a null byte and be accessible! + static MemoryBuffer *getMemBuffer(const char *StartPtr, const char *EndPtr, + const char *BufferName = ""); + + /// getMemBufferCopy - Open the specified memory range as a MemoryBuffer, + /// copying the contents and taking ownership of it. This has no requirements + /// on EndPtr[0]. + static MemoryBuffer *getMemBufferCopy(const char *StartPtr,const char *EndPtr, + const char *BufferName = ""); + + /// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that + /// is completely initialized to zeros. Note that the caller should + /// initialize the memory allocated by this method. The memory is owned by + /// the MemoryBuffer object. + static MemoryBuffer *getNewMemBuffer(size_t Size, + const char *BufferName = ""); + + /// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size + /// that is not initialized. Note that the caller should initialize the + /// memory allocated by this method. The memory is owned by the MemoryBuffer + /// object. + static MemoryBuffer *getNewUninitMemBuffer(size_t Size, + const char *BufferName = ""); + + /// getSTDIN - Read all of stdin into a file buffer, and return it. This + /// returns null if stdin is empty. + static MemoryBuffer *getSTDIN(); + + + /// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin + /// if the Filename is "-". If an error occurs, this returns null and fills + /// in *ErrStr with a reason. If stdin is empty, this API (unlike getSTDIN) + /// returns an empty buffer. + static MemoryBuffer *getFileOrSTDIN(const char *Filename, + std::string *ErrStr = 0, + int64_t FileSize = -1); + + /// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin + /// if the Filename is "-". If an error occurs, this returns null and fills + /// in *ErrStr with a reason. + static MemoryBuffer *getFileOrSTDIN(const std::string &FN, + std::string *ErrStr = 0, + int64_t FileSize = -1) { + return getFileOrSTDIN(FN.c_str(), ErrStr, FileSize); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/MutexGuard.h b/include/llvm/Support/MutexGuard.h new file mode 100644 index 0000000..9958b97 --- /dev/null +++ b/include/llvm/Support/MutexGuard.h @@ -0,0 +1,41 @@ +//===-- Support/MutexGuard.h - Acquire/Release Mutex In Scope ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a guard for a block of code that ensures a Mutex is locked +// upon construction and released upon destruction. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_MUTEXGUARD_H +#define LLVM_SUPPORT_MUTEXGUARD_H + +#include "llvm/System/Mutex.h" + +namespace llvm { + /// Instances of this class acquire a given Mutex Lock when constructed and + /// hold that lock until destruction. The intention is to instantiate one of + /// these on the stack at the top of some scope to be assured that C++ + /// destruction of the object will always release the Mutex and thus avoid + /// a host of nasty multi-threading problems in the face of exceptions, etc. + /// @brief Guard a section of code with a Mutex. + class MutexGuard { + sys::Mutex &M; + MutexGuard(const MutexGuard &); // DO NOT IMPLEMENT + void operator=(const MutexGuard &); // DO NOT IMPLEMENT + public: + MutexGuard(sys::Mutex &m) : M(m) { M.acquire(); } + ~MutexGuard() { M.release(); } + /// holds - Returns true if this locker instance holds the specified lock. + /// This is mostly used in assertions to validate that the correct mutex + /// is held. + bool holds(const sys::Mutex& lock) const { return &M == &lock; } + }; +} + +#endif // LLVM_SUPPORT_MUTEXGUARD_H diff --git a/include/llvm/Support/NoFolder.h b/include/llvm/Support/NoFolder.h new file mode 100644 index 0000000..1dce497 --- /dev/null +++ b/include/llvm/Support/NoFolder.h @@ -0,0 +1,178 @@ +//======-- llvm/Support/NoFolder.h - Constant folding helper -*- C++ -*-======// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the NoFolder class, a helper for IRBuilder. It provides +// IRBuilder with a set of methods for creating unfolded constants. This is +// useful for learners trying to understand how LLVM IR works, and who don't +// want details to be hidden by the constant folder. For general constant +// creation and folding, use ConstantExpr and the routines in +// llvm/Analysis/ConstantFolding.h. +// +// Note: since it is not actually possible to create unfolded constants, this +// class returns values rather than constants. The values do not have names, +// even if names were provided to IRBuilder, which may be confusing. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_NOFOLDER_H +#define LLVM_SUPPORT_NOFOLDER_H + +#include "llvm/Constants.h" +#include "llvm/Instructions.h" + +namespace llvm { + +/// NoFolder - Create "constants" (actually, values) with no folding. +class NoFolder { +public: + + //===--------------------------------------------------------------------===// + // Binary Operators + //===--------------------------------------------------------------------===// + + Value *CreateAdd(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateAdd(LHS, RHS); + } + Value *CreateSub(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateSub(LHS, RHS); + } + Value *CreateMul(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateMul(LHS, RHS); + } + Value *CreateUDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateUDiv(LHS, RHS); + } + Value *CreateSDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateSDiv(LHS, RHS); + } + Value *CreateFDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateFDiv(LHS, RHS); + } + Value *CreateURem(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateURem(LHS, RHS); + } + Value *CreateSRem(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateSRem(LHS, RHS); + } + Value *CreateFRem(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateFRem(LHS, RHS); + } + Value *CreateShl(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateShl(LHS, RHS); + } + Value *CreateLShr(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateLShr(LHS, RHS); + } + Value *CreateAShr(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateAShr(LHS, RHS); + } + Value *CreateAnd(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateAnd(LHS, RHS); + } + Value *CreateOr(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateOr(LHS, RHS); + } + Value *CreateXor(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateXor(LHS, RHS); + } + + Value *CreateBinOp(Instruction::BinaryOps Opc, + Constant *LHS, Constant *RHS) const { + return BinaryOperator::Create(Opc, LHS, RHS); + } + + //===--------------------------------------------------------------------===// + // Unary Operators + //===--------------------------------------------------------------------===// + + Value *CreateNeg(Constant *C) const { + return BinaryOperator::CreateNeg(C); + } + Value *CreateNot(Constant *C) const { + return BinaryOperator::CreateNot(C); + } + + //===--------------------------------------------------------------------===// + // Memory Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateGetElementPtr(Constant *C, Constant* const *IdxList, + unsigned NumIdx) const { + return ConstantExpr::getGetElementPtr(C, IdxList, NumIdx); + } + Value *CreateGetElementPtr(Constant *C, Value* const *IdxList, + unsigned NumIdx) const { + return GetElementPtrInst::Create(C, IdxList, IdxList+NumIdx); + } + + //===--------------------------------------------------------------------===// + // Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + Value *CreateCast(Instruction::CastOps Op, Constant *C, + const Type *DestTy) const { + return CastInst::Create(Op, C, DestTy); + } + Value *CreateIntCast(Constant *C, const Type *DestTy, + bool isSigned) const { + return CastInst::CreateIntegerCast(C, DestTy, isSigned); + } + + //===--------------------------------------------------------------------===// + // Compare Instructions + //===--------------------------------------------------------------------===// + + Value *CreateICmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { + return new ICmpInst(P, LHS, RHS); + } + Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { + return new FCmpInst(P, LHS, RHS); + } + Value *CreateVICmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { + return new VICmpInst(P, LHS, RHS); + } + Value *CreateVFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { + return new VFCmpInst(P, LHS, RHS); + } + + //===--------------------------------------------------------------------===// + // Other Instructions + //===--------------------------------------------------------------------===// + + Value *CreateSelect(Constant *C, Constant *True, Constant *False) const { + return SelectInst::Create(C, True, False); + } + + Value *CreateExtractElement(Constant *Vec, Constant *Idx) const { + return new ExtractElementInst(Vec, Idx); + } + + Value *CreateInsertElement(Constant *Vec, Constant *NewElt, + Constant *Idx) const { + return InsertElementInst::Create(Vec, NewElt, Idx); + } + + Value *CreateShuffleVector(Constant *V1, Constant *V2, Constant *Mask) const { + return new ShuffleVectorInst(V1, V2, Mask); + } + + Value *CreateExtractValue(Constant *Agg, const unsigned *IdxList, + unsigned NumIdx) const { + return ExtractValueInst::Create(Agg, IdxList, IdxList+NumIdx); + } + + Value *CreateInsertValue(Constant *Agg, Constant *Val, + const unsigned *IdxList, unsigned NumIdx) const { + return InsertValueInst::Create(Agg, Val, IdxList, IdxList+NumIdx); + } +}; + +} + +#endif diff --git a/include/llvm/Support/OutputBuffer.h b/include/llvm/Support/OutputBuffer.h new file mode 100644 index 0000000..b2077c5 --- /dev/null +++ b/include/llvm/Support/OutputBuffer.h @@ -0,0 +1,154 @@ +//=== OutputBuffer.h - Output Buffer ----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Methods to output values to a data buffer. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_OUTPUTBUFFER_H +#define LLVM_SUPPORT_OUTPUTBUFFER_H + +#include <string> +#include <vector> + +namespace llvm { + + class OutputBuffer { + /// Output buffer. + std::vector<unsigned char> &Output; + + /// is64Bit/isLittleEndian - This information is inferred from the target + /// machine directly, indicating what header values and flags to set. + bool is64Bit, isLittleEndian; + public: + OutputBuffer(std::vector<unsigned char> &Out, + bool is64bit, bool le) + : Output(Out), is64Bit(is64bit), isLittleEndian(le) {} + + // align - Emit padding into the file until the current output position is + // aligned to the specified power of two boundary. + void align(unsigned Boundary) { + assert(Boundary && (Boundary & (Boundary - 1)) == 0 && + "Must align to 2^k boundary"); + size_t Size = Output.size(); + + if (Size & (Boundary - 1)) { + // Add padding to get alignment to the correct place. + size_t Pad = Boundary - (Size & (Boundary - 1)); + Output.resize(Size + Pad); + } + } + + //===------------------------------------------------------------------===// + // Out Functions - Output the specified value to the data buffer. + + void outbyte(unsigned char X) { + Output.push_back(X); + } + void outhalf(unsigned short X) { + if (isLittleEndian) { + Output.push_back(X & 255); + Output.push_back(X >> 8); + } else { + Output.push_back(X >> 8); + Output.push_back(X & 255); + } + } + void outword(unsigned X) { + if (isLittleEndian) { + Output.push_back((X >> 0) & 255); + Output.push_back((X >> 8) & 255); + Output.push_back((X >> 16) & 255); + Output.push_back((X >> 24) & 255); + } else { + Output.push_back((X >> 24) & 255); + Output.push_back((X >> 16) & 255); + Output.push_back((X >> 8) & 255); + Output.push_back((X >> 0) & 255); + } + } + void outxword(uint64_t X) { + if (isLittleEndian) { + Output.push_back(unsigned(X >> 0) & 255); + Output.push_back(unsigned(X >> 8) & 255); + Output.push_back(unsigned(X >> 16) & 255); + Output.push_back(unsigned(X >> 24) & 255); + Output.push_back(unsigned(X >> 32) & 255); + Output.push_back(unsigned(X >> 40) & 255); + Output.push_back(unsigned(X >> 48) & 255); + Output.push_back(unsigned(X >> 56) & 255); + } else { + Output.push_back(unsigned(X >> 56) & 255); + Output.push_back(unsigned(X >> 48) & 255); + Output.push_back(unsigned(X >> 40) & 255); + Output.push_back(unsigned(X >> 32) & 255); + Output.push_back(unsigned(X >> 24) & 255); + Output.push_back(unsigned(X >> 16) & 255); + Output.push_back(unsigned(X >> 8) & 255); + Output.push_back(unsigned(X >> 0) & 255); + } + } + void outaddr32(unsigned X) { + outword(X); + } + void outaddr64(uint64_t X) { + outxword(X); + } + void outaddr(uint64_t X) { + if (!is64Bit) + outword((unsigned)X); + else + outxword(X); + } + void outstring(const std::string &S, unsigned Length) { + unsigned len_to_copy = static_cast<unsigned>(S.length()) < Length + ? static_cast<unsigned>(S.length()) : Length; + unsigned len_to_fill = static_cast<unsigned>(S.length()) < Length + ? Length - static_cast<unsigned>(S.length()) : 0; + + for (unsigned i = 0; i < len_to_copy; ++i) + outbyte(S[i]); + + for (unsigned i = 0; i < len_to_fill; ++i) + outbyte(0); + } + + //===------------------------------------------------------------------===// + // Fix Functions - Replace an existing entry at an offset. + + void fixhalf(unsigned short X, unsigned Offset) { + unsigned char *P = &Output[Offset]; + P[0] = (X >> (isLittleEndian ? 0 : 8)) & 255; + P[1] = (X >> (isLittleEndian ? 8 : 0)) & 255; + } + void fixword(unsigned X, unsigned Offset) { + unsigned char *P = &Output[Offset]; + P[0] = (X >> (isLittleEndian ? 0 : 24)) & 255; + P[1] = (X >> (isLittleEndian ? 8 : 16)) & 255; + P[2] = (X >> (isLittleEndian ? 16 : 8)) & 255; + P[3] = (X >> (isLittleEndian ? 24 : 0)) & 255; + } + void fixaddr(uint64_t X, unsigned Offset) { + if (!is64Bit) + fixword((unsigned)X, Offset); + else + assert(0 && "Emission of 64-bit data not implemented yet!"); + } + + unsigned char &operator[](unsigned Index) { + return Output[Index]; + } + const unsigned char &operator[](unsigned Index) const { + return Output[Index]; + } + }; + +} // end llvm namespace + +#endif // LLVM_SUPPORT_OUTPUTBUFFER_H diff --git a/include/llvm/Support/PassNameParser.h b/include/llvm/Support/PassNameParser.h new file mode 100644 index 0000000..e489e0a --- /dev/null +++ b/include/llvm/Support/PassNameParser.h @@ -0,0 +1,133 @@ +//===- llvm/Support/PassNameParser.h ----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file the PassNameParser and FilteredPassNameParser<> classes, which are +// used to add command line arguments to a utility for all of the passes that +// have been registered into the system. +// +// The PassNameParser class adds ALL passes linked into the system (that are +// creatable) as command line arguments to the tool (when instantiated with the +// appropriate command line option template). The FilteredPassNameParser<> +// template is used for the same purposes as PassNameParser, except that it only +// includes passes that have a PassType that are compatible with the filter +// (which is the template argument). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_PASS_NAME_PARSER_H +#define LLVM_SUPPORT_PASS_NAME_PARSER_H + +#include "llvm/Support/CommandLine.h" +#include "llvm/Pass.h" +#include <algorithm> +#include <cstring> + +namespace llvm { + +//===----------------------------------------------------------------------===// +// PassNameParser class - Make use of the pass registration mechanism to +// automatically add a command line argument to opt for each pass. +// +class PassNameParser : public PassRegistrationListener, + public cl::parser<const PassInfo*> { + cl::Option *Opt; +public: + PassNameParser() : Opt(0) {} + + void initialize(cl::Option &O) { + Opt = &O; + cl::parser<const PassInfo*>::initialize(O); + + // Add all of the passes to the map that got initialized before 'this' did. + enumeratePasses(); + } + + // ignorablePassImpl - Can be overriden in subclasses to refine the list of + // which passes we want to include. + // + virtual bool ignorablePassImpl(const PassInfo *P) const { return false; } + + inline bool ignorablePass(const PassInfo *P) const { + // Ignore non-selectable and non-constructible passes! Ignore + // non-optimizations. + return P->getPassArgument() == 0 || *P->getPassArgument() == 0 || + P->getNormalCtor() == 0 || ignorablePassImpl(P); + } + + // Implement the PassRegistrationListener callbacks used to populate our map + // + virtual void passRegistered(const PassInfo *P) { + if (ignorablePass(P) || !Opt) return; + if (findOption(P->getPassArgument()) != getNumOptions()) { + cerr << "Two passes with the same argument (-" + << P->getPassArgument() << ") attempted to be registered!\n"; + abort(); + } + addLiteralOption(P->getPassArgument(), P, P->getPassName()); + } + virtual void passEnumerate(const PassInfo *P) { passRegistered(P); } + + // ValLessThan - Provide a sorting comparator for Values elements... + typedef std::pair<const char*, + std::pair<const PassInfo*, const char*> > ValType; + static bool ValLessThan(const ValType &VT1, const ValType &VT2) { + return std::string(VT1.first) < std::string(VT2.first); + } + + // printOptionInfo - Print out information about this option. Override the + // default implementation to sort the table before we print... + virtual void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const { + PassNameParser *PNP = const_cast<PassNameParser*>(this); + std::sort(PNP->Values.begin(), PNP->Values.end(), ValLessThan); + cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth); + } +}; + +///===----------------------------------------------------------------------===// +/// FilteredPassNameParser class - Make use of the pass registration +/// mechanism to automatically add a command line argument to opt for +/// each pass that satisfies a filter criteria. Filter should return +/// true for passes to be registered as command-line options. +/// +template<typename Filter> +class FilteredPassNameParser : public PassNameParser { +private: + Filter filter; + +public: + bool ignorablePassImpl(const PassInfo *P) const { return !filter(*P); } +}; + +///===----------------------------------------------------------------------===// +/// PassArgFilter - A filter for use with PassNameFilterParser that only +/// accepts a Pass whose Arg matches certain strings. +/// +/// Use like this: +/// +/// extern const char AllowedPassArgs[] = "-anders_aa -dse"; +/// +/// static cl::list< +/// const PassInfo*, +/// bool, +/// FilteredPassNameParser<PassArgFilter<AllowedPassArgs> > > +/// PassList(cl::desc("Passes available:")); +/// +/// Only the -anders_aa and -dse options will be available to the user. +/// +template<const char *Args> +class PassArgFilter { +public: + bool operator()(const PassInfo &P) const { + return(std::strstr(Args, P.getPassArgument())); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/PatternMatch.h b/include/llvm/Support/PatternMatch.h new file mode 100644 index 0000000..d27a7f1 --- /dev/null +++ b/include/llvm/Support/PatternMatch.h @@ -0,0 +1,531 @@ +//===-- llvm/Support/PatternMatch.h - Match on the LLVM IR ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides a simple and efficient mechanism for performing general +// tree-based pattern matches on the LLVM IR. The power of these routines is +// that it allows you to write concise patterns that are expressive and easy to +// understand. The other major advantage of this is that it allows you to +// trivially capture/bind elements in the pattern to variables. For example, +// you can do something like this: +// +// Value *Exp = ... +// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2) +// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)), +// m_And(m_Value(Y), m_ConstantInt(C2))))) { +// ... Pattern is matched and variables are bound ... +// } +// +// This is primarily useful to things like the instruction combiner, but can +// also be useful for static analysis tools or code generators. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_PATTERNMATCH_H +#define LLVM_SUPPORT_PATTERNMATCH_H + +#include "llvm/Constants.h" +#include "llvm/Instructions.h" + +namespace llvm { +namespace PatternMatch { + +template<typename Val, typename Pattern> +bool match(Val *V, const Pattern &P) { + return const_cast<Pattern&>(P).match(V); +} + +template<typename Class> +struct leaf_ty { + template<typename ITy> + bool match(ITy *V) { return isa<Class>(V); } +}; + +/// m_Value() - Match an arbitrary value and ignore it. +inline leaf_ty<Value> m_Value() { return leaf_ty<Value>(); } +/// m_ConstantInt() - Match an arbitrary ConstantInt and ignore it. +inline leaf_ty<ConstantInt> m_ConstantInt() { return leaf_ty<ConstantInt>(); } + +template<int64_t Val> +struct constantint_ty { + template<typename ITy> + bool match(ITy *V) { + if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { + const APInt &CIV = CI->getValue(); + if (Val >= 0) + return CIV == Val; + // If Val is negative, and CI is shorter than it, truncate to the right + // number of bits. If it is larger, then we have to sign extend. Just + // compare their negated values. + return -CIV == -Val; + } + return false; + } +}; + +/// m_ConstantInt(int64_t) - Match a ConstantInt with a specific value +/// and ignore it. +template<int64_t Val> +inline constantint_ty<Val> m_ConstantInt() { + return constantint_ty<Val>(); +} + +struct zero_ty { + template<typename ITy> + bool match(ITy *V) { + if (const Constant *C = dyn_cast<Constant>(V)) + return C->isNullValue(); + return false; + } +}; + +/// m_Zero() - Match an arbitrary zero/null constant. +inline zero_ty m_Zero() { return zero_ty(); } + + +template<typename Class> +struct bind_ty { + Class *&VR; + bind_ty(Class *&V) : VR(V) {} + + template<typename ITy> + bool match(ITy *V) { + if (Class *CV = dyn_cast<Class>(V)) { + VR = CV; + return true; + } + return false; + } +}; + +/// m_Value - Match a value, capturing it if we match. +inline bind_ty<Value> m_Value(Value *&V) { return V; } + +/// m_ConstantInt - Match a ConstantInt, capturing the value if we match. +inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; } + +/// specificval_ty - Match a specified Value*. +struct specificval_ty { + const Value *Val; + specificval_ty(const Value *V) : Val(V) {} + + template<typename ITy> + bool match(ITy *V) { + return V == Val; + } +}; + +/// m_Specific - Match if we have a specific specified value. +inline specificval_ty m_Specific(const Value *V) { return V; } + + +//===----------------------------------------------------------------------===// +// Matchers for specific binary operators. +// + +template<typename LHS_t, typename RHS_t, + unsigned Opcode, typename ConcreteTy = BinaryOperator> +struct BinaryOp_match { + LHS_t L; + RHS_t R; + + BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (V->getValueID() == Value::InstructionVal + Opcode) { + ConcreteTy *I = cast<ConcreteTy>(V); + return I->getOpcode() == Opcode && L.match(I->getOperand(0)) && + R.match(I->getOperand(1)); + } + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) + return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) && + R.match(CE->getOperand(1)); + return false; + } +}; + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::And>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L, + const RHS &R) { + return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R); +} + +//===----------------------------------------------------------------------===// +// Matchers for either AShr or LShr .. for convenience +// +template<typename LHS_t, typename RHS_t, typename ConcreteTy = BinaryOperator> +struct Shr_match { + LHS_t L; + RHS_t R; + + Shr_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (V->getValueID() == Value::InstructionVal + Instruction::LShr || + V->getValueID() == Value::InstructionVal + Instruction::AShr) { + ConcreteTy *I = cast<ConcreteTy>(V); + return (I->getOpcode() == Instruction::AShr || + I->getOpcode() == Instruction::LShr) && + L.match(I->getOperand(0)) && + R.match(I->getOperand(1)); + } + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) + return (CE->getOpcode() == Instruction::LShr || + CE->getOpcode() == Instruction::AShr) && + L.match(CE->getOperand(0)) && + R.match(CE->getOperand(1)); + return false; + } +}; + +template<typename LHS, typename RHS> +inline Shr_match<LHS, RHS> m_Shr(const LHS &L, const RHS &R) { + return Shr_match<LHS, RHS>(L, R); +} + +//===----------------------------------------------------------------------===// +// Matchers for binary classes +// + +template<typename LHS_t, typename RHS_t, typename Class, typename OpcType> +struct BinaryOpClass_match { + OpcType *Opcode; + LHS_t L; + RHS_t R; + + BinaryOpClass_match(OpcType &Op, const LHS_t &LHS, + const RHS_t &RHS) + : Opcode(&Op), L(LHS), R(RHS) {} + BinaryOpClass_match(const LHS_t &LHS, const RHS_t &RHS) + : Opcode(0), L(LHS), R(RHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (Class *I = dyn_cast<Class>(V)) + if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) { + if (Opcode) + *Opcode = I->getOpcode(); + return true; + } +#if 0 // Doesn't handle constantexprs yet! + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) + return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) && + R.match(CE->getOperand(1)); +#endif + return false; + } +}; + +template<typename LHS, typename RHS> +inline BinaryOpClass_match<LHS, RHS, BinaryOperator, Instruction::BinaryOps> +m_Shift(Instruction::BinaryOps &Op, const LHS &L, const RHS &R) { + return BinaryOpClass_match<LHS, RHS, + BinaryOperator, Instruction::BinaryOps>(Op, L, R); +} + +template<typename LHS, typename RHS> +inline BinaryOpClass_match<LHS, RHS, BinaryOperator, Instruction::BinaryOps> +m_Shift(const LHS &L, const RHS &R) { + return BinaryOpClass_match<LHS, RHS, + BinaryOperator, Instruction::BinaryOps>(L, R); +} + +//===----------------------------------------------------------------------===// +// Matchers for CmpInst classes +// + +template<typename LHS_t, typename RHS_t, typename Class, typename PredicateTy> +struct CmpClass_match { + PredicateTy &Predicate; + LHS_t L; + RHS_t R; + + CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, + const RHS_t &RHS) + : Predicate(Pred), L(LHS), R(RHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (Class *I = dyn_cast<Class>(V)) + if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) { + Predicate = I->getPredicate(); + return true; + } + return false; + } +}; + +template<typename LHS, typename RHS> +inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate> +m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { + return CmpClass_match<LHS, RHS, + ICmpInst, ICmpInst::Predicate>(Pred, L, R); +} + +template<typename LHS, typename RHS> +inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate> +m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) { + return CmpClass_match<LHS, RHS, + FCmpInst, FCmpInst::Predicate>(Pred, L, R); +} + +//===----------------------------------------------------------------------===// +// Matchers for SelectInst classes +// + +template<typename Cond_t, typename LHS_t, typename RHS_t> +struct SelectClass_match { + Cond_t C; + LHS_t L; + RHS_t R; + + SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, + const RHS_t &RHS) + : C(Cond), L(LHS), R(RHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (SelectInst *I = dyn_cast<SelectInst>(V)) + return C.match(I->getOperand(0)) && + L.match(I->getOperand(1)) && + R.match(I->getOperand(2)); + return false; + } +}; + +template<typename Cond, typename LHS, typename RHS> +inline SelectClass_match<Cond, RHS, LHS> +m_Select(const Cond &C, const LHS &L, const RHS &R) { + return SelectClass_match<Cond, LHS, RHS>(C, L, R); +} + +/// m_SelectCst - This matches a select of two constants, e.g.: +/// m_SelectCst(m_Value(V), -1, 0) +template<int64_t L, int64_t R, typename Cond> +inline SelectClass_match<Cond, constantint_ty<L>, constantint_ty<R> > +m_SelectCst(const Cond &C) { + return SelectClass_match<Cond, constantint_ty<L>, + constantint_ty<R> >(C, m_ConstantInt<L>(), + m_ConstantInt<R>()); +} + + +//===----------------------------------------------------------------------===// +// Matchers for CastInst classes +// + +template<typename Op_t, typename Class> +struct CastClass_match { + Op_t Op; + + CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (Class *I = dyn_cast<Class>(V)) + return Op.match(I->getOperand(0)); + return false; + } +}; + +template<typename Class, typename OpTy> +inline CastClass_match<OpTy, Class> m_Cast(const OpTy &Op) { + return CastClass_match<OpTy, Class>(Op); +} + + +//===----------------------------------------------------------------------===// +// Matchers for unary operators +// + +template<typename LHS_t> +struct not_match { + LHS_t L; + + not_match(const LHS_t &LHS) : L(LHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (Instruction *I = dyn_cast<Instruction>(V)) + if (I->getOpcode() == Instruction::Xor) + return matchIfNot(I->getOperand(0), I->getOperand(1)); + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) + if (CE->getOpcode() == Instruction::Xor) + return matchIfNot(CE->getOperand(0), CE->getOperand(1)); + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) + return L.match(ConstantExpr::getNot(CI)); + return false; + } +private: + bool matchIfNot(Value *LHS, Value *RHS) { + if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) + return CI->isAllOnesValue() && L.match(LHS); + if (ConstantInt *CI = dyn_cast<ConstantInt>(LHS)) + return CI->isAllOnesValue() && L.match(RHS); + if (ConstantVector *CV = dyn_cast<ConstantVector>(RHS)) + return CV->isAllOnesValue() && L.match(LHS); + if (ConstantVector *CV = dyn_cast<ConstantVector>(LHS)) + return CV->isAllOnesValue() && L.match(RHS); + return false; + } +}; + +template<typename LHS> +inline not_match<LHS> m_Not(const LHS &L) { return L; } + + +template<typename LHS_t> +struct neg_match { + LHS_t L; + + neg_match(const LHS_t &LHS) : L(LHS) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (Instruction *I = dyn_cast<Instruction>(V)) + if (I->getOpcode() == Instruction::Sub) + return matchIfNeg(I->getOperand(0), I->getOperand(1)); + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) + if (CE->getOpcode() == Instruction::Sub) + return matchIfNeg(CE->getOperand(0), CE->getOperand(1)); + if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) + return L.match(ConstantExpr::getNeg(CI)); + return false; + } +private: + bool matchIfNeg(Value *LHS, Value *RHS) { + return LHS == ConstantExpr::getZeroValueForNegationExpr(LHS->getType()) && + L.match(RHS); + } +}; + +template<typename LHS> +inline neg_match<LHS> m_Neg(const LHS &L) { return L; } + + +//===----------------------------------------------------------------------===// +// Matchers for control flow +// + +template<typename Cond_t> +struct brc_match { + Cond_t Cond; + BasicBlock *&T, *&F; + brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f) + : Cond(C), T(t), F(f) { + } + + template<typename OpTy> + bool match(OpTy *V) { + if (BranchInst *BI = dyn_cast<BranchInst>(V)) + if (BI->isConditional()) { + if (Cond.match(BI->getCondition())) { + T = BI->getSuccessor(0); + F = BI->getSuccessor(1); + return true; + } + } + return false; + } +}; + +template<typename Cond_t> +inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) { + return brc_match<Cond_t>(C, T, F); +} + +} // end namespace PatternMatch +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/PluginLoader.h b/include/llvm/Support/PluginLoader.h new file mode 100644 index 0000000..bdbb134 --- /dev/null +++ b/include/llvm/Support/PluginLoader.h @@ -0,0 +1,37 @@ +//===-- llvm/Support/PluginLoader.h - Plugin Loader for Tools ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// A tool can #include this file to get a -load option that allows the user to +// load arbitrary shared objects into the tool's address space. Note that this +// header can only be included by a program ONCE, so it should never to used by +// library authors. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_PLUGINLOADER_H +#define LLVM_SUPPORT_PLUGINLOADER_H + +#include "llvm/Support/CommandLine.h" + +namespace llvm { + struct PluginLoader { + void operator=(const std::string &Filename); + static unsigned getNumPlugins(); + static std::string& getPlugin(unsigned num); + }; + +#ifndef DONT_GET_PLUGIN_LOADER_OPTION + // This causes operator= above to be invoked for every -load option. + static cl::opt<PluginLoader, false, cl::parser<std::string> > + LoadOpt("load", cl::ZeroOrMore, cl::value_desc("pluginfilename"), + cl::desc("Load the specified plugin")); +#endif +} + +#endif diff --git a/include/llvm/Support/PointerLikeTypeTraits.h b/include/llvm/Support/PointerLikeTypeTraits.h new file mode 100644 index 0000000..b0edd3b --- /dev/null +++ b/include/llvm/Support/PointerLikeTypeTraits.h @@ -0,0 +1,77 @@ +//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerLikeTypeTraits class. This allows data +// structures to reason about pointers and other things that are pointer sized. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H +#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +/// PointerLikeTypeTraits - This is a traits object that is used to handle +/// pointer types and things that are just wrappers for pointers as a uniform +/// entity. +template <typename T> +class PointerLikeTypeTraits { + // getAsVoidPointer + // getFromVoidPointer + // getNumLowBitsAvailable +}; + +// Provide PointerLikeTypeTraits for non-cvr pointers. +template<typename T> +class PointerLikeTypeTraits<T*> { +public: + static inline void *getAsVoidPointer(T* P) { return P; } + static inline T *getFromVoidPointer(void *P) { + return static_cast<T*>(P); + } + + /// Note, we assume here that malloc returns objects at least 8-byte aligned. + /// However, this may be wrong, or pointers may be from something other than + /// malloc. In this case, you should specialize this template to reduce this. + /// + /// All clients should use assertions to do a run-time check to ensure that + /// this is actually true. + enum { NumLowBitsAvailable = 2 }; +}; + +// Provide PointerLikeTypeTraits for const pointers. +template<typename T> +class PointerLikeTypeTraits<const T*> { +public: + static inline const void *getAsVoidPointer(const T* P) { return P; } + static inline const T *getFromVoidPointer(const void *P) { + return static_cast<const T*>(P); + } + enum { NumLowBitsAvailable = 2 }; +}; + +// Provide PointerLikeTypeTraits for uintptr_t. +template<> +class PointerLikeTypeTraits<uintptr_t> { +public: + static inline void *getAsVoidPointer(uintptr_t P) { + return reinterpret_cast<void*>(P); + } + static inline uintptr_t getFromVoidPointer(void *P) { + return reinterpret_cast<uintptr_t>(P); + } + // No bits are available! + enum { NumLowBitsAvailable = 0 }; +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/PredIteratorCache.h b/include/llvm/Support/PredIteratorCache.h new file mode 100644 index 0000000..bb66a8e --- /dev/null +++ b/include/llvm/Support/PredIteratorCache.h @@ -0,0 +1,70 @@ +//===- llvm/Support/PredIteratorCache.h - pred_iterator Cache ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PredIteratorCache class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/Allocator.h" +#include "llvm/Support/CFG.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" + +#ifndef LLVM_SUPPORT_PREDITERATORCACHE_H +#define LLVM_SUPPORT_PREDITERATORCACHE_H + +namespace llvm { + + /// PredIteratorCache - This class is an extremely trivial cache for + /// predecessor iterator queries. This is useful for code that repeatedly + /// wants the predecessor list for the same blocks. + class PredIteratorCache { + /// BlockToPredsMap - Pointer to null-terminated list. + DenseMap<BasicBlock*, BasicBlock**> BlockToPredsMap; + DenseMap<BasicBlock*, unsigned> BlockToPredCountMap; + + /// Memory - This is the space that holds cached preds. + BumpPtrAllocator Memory; + public: + + /// GetPreds - Get a cached list for the null-terminated predecessor list of + /// the specified block. This can be used in a loop like this: + /// for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) + /// use(*PI); + /// instead of: + /// for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) + BasicBlock **GetPreds(BasicBlock *BB) { + BasicBlock **&Entry = BlockToPredsMap[BB]; + if (Entry) return Entry; + + SmallVector<BasicBlock*, 32> PredCache(pred_begin(BB), pred_end(BB)); + PredCache.push_back(0); // null terminator. + + BlockToPredCountMap[BB] = PredCache.size()-1; + + Entry = Memory.Allocate<BasicBlock*>(PredCache.size()); + std::copy(PredCache.begin(), PredCache.end(), Entry); + return Entry; + } + + unsigned GetNumPreds(BasicBlock *BB) { + GetPreds(BB); + return BlockToPredCountMap[BB]; + } + + /// clear - Remove all information. + void clear() { + BlockToPredsMap.clear(); + BlockToPredCountMap.clear(); + Memory.Reset(); + } + }; +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/PrettyStackTrace.h b/include/llvm/Support/PrettyStackTrace.h new file mode 100644 index 0000000..909d286 --- /dev/null +++ b/include/llvm/Support/PrettyStackTrace.h @@ -0,0 +1,65 @@ +//===- llvm/Support/PrettyStackTrace.h - Pretty Crash Handling --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the PrettyStackTraceEntry class, which is used to make +// crashes give more contextual information about what the program was doing +// when it crashed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H +#define LLVM_SUPPORT_PRETTYSTACKTRACE_H + +namespace llvm { + class raw_ostream; + + /// PrettyStackTraceEntry - This class is used to represent a frame of the + /// "pretty" stack trace that is dumped when a program crashes. You can define + /// subclasses of this and declare them on the program stack: when they are + /// constructed and destructed, they will add their symbolic frames to a + /// virtual stack trace. This gets dumped out if the program crashes. + class PrettyStackTraceEntry { + const PrettyStackTraceEntry *NextEntry; + PrettyStackTraceEntry(const PrettyStackTraceEntry &); // DO NOT IMPLEMENT + void operator=(const PrettyStackTraceEntry&); // DO NOT IMPLEMENT + public: + PrettyStackTraceEntry(); + virtual ~PrettyStackTraceEntry(); + + /// print - Emit information about this stack frame to OS. + virtual void print(raw_ostream &OS) const = 0; + + /// getNextEntry - Return the next entry in the list of frames. + const PrettyStackTraceEntry *getNextEntry() const { return NextEntry; } + }; + + /// PrettyStackTraceString - This object prints a specified string (which + /// should not contain newlines) to the stream as the stack trace when a crash + /// occurs. + class PrettyStackTraceString : public PrettyStackTraceEntry { + const char *Str; + public: + PrettyStackTraceString(const char *str) : Str(str) {} + virtual void print(raw_ostream &OS) const; + }; + + /// PrettyStackTraceProgram - This object prints a specified program arguments + /// to the stream as the stack trace when a crash occurs. + class PrettyStackTraceProgram : public PrettyStackTraceEntry { + int ArgC; + const char *const *ArgV; + public: + PrettyStackTraceProgram(int argc, const char * const*argv) + : ArgC(argc), ArgV(argv) {} + virtual void print(raw_ostream &OS) const; + }; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Support/Recycler.h b/include/llvm/Support/Recycler.h new file mode 100644 index 0000000..2fa0365 --- /dev/null +++ b/include/llvm/Support/Recycler.h @@ -0,0 +1,116 @@ +//==- llvm/Support/Recycler.h - Recycling Allocator --------------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the Recycler class template. See the doxygen comment for +// Recycler for more details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_RECYCLER_H +#define LLVM_SUPPORT_RECYCLER_H + +#include "llvm/ADT/ilist.h" +#include "llvm/Support/AlignOf.h" +#include <cassert> + +namespace llvm { + +/// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for +/// printing statistics. +/// +void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize); + +/// RecyclerStruct - Implementation detail for Recycler. This is a +/// class that the recycler imposes on free'd memory to carve out +/// next/prev pointers. +struct RecyclerStruct { + RecyclerStruct *Prev, *Next; +}; + +template<> +struct ilist_traits<RecyclerStruct> : ilist_default_traits<RecyclerStruct> { + static RecyclerStruct *getPrev(const RecyclerStruct *t) { return t->Prev; } + static RecyclerStruct *getNext(const RecyclerStruct *t) { return t->Next; } + static void setPrev(RecyclerStruct *t, RecyclerStruct *p) { t->Prev = p; } + static void setNext(RecyclerStruct *t, RecyclerStruct *n) { t->Next = n; } + + mutable RecyclerStruct Sentinel; + RecyclerStruct *createSentinel() const { + return &Sentinel; + } + static void destroySentinel(RecyclerStruct *) {} + + RecyclerStruct *provideInitialHead() const { return createSentinel(); } + RecyclerStruct *ensureHead(RecyclerStruct*) const { return createSentinel(); } + static void noteHead(RecyclerStruct*, RecyclerStruct*) {} + + static void deleteNode(RecyclerStruct *) { + assert(0 && "Recycler's ilist_traits shouldn't see a deleteNode call!"); + } +}; + +/// Recycler - This class manages a linked-list of deallocated nodes +/// and facilitates reusing deallocated memory in place of allocating +/// new memory. +/// +template<class T, size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment> +class Recycler { + /// FreeList - Doubly-linked list of nodes that have deleted contents and + /// are not in active use. + /// + iplist<RecyclerStruct> FreeList; + +public: + ~Recycler() { + // If this fails, either the callee has lost track of some allocation, + // or the callee isn't tracking allocations and should just call + // clear() before deleting the Recycler. + assert(FreeList.empty() && "Non-empty recycler deleted!"); + } + + /// clear - Release all the tracked allocations to the allocator. The + /// recycler must be free of any tracked allocations before being + /// deleted; calling clear is one way to ensure this. + template<class AllocatorType> + void clear(AllocatorType &Allocator) { + while (!FreeList.empty()) { + T *t = reinterpret_cast<T *>(FreeList.remove(FreeList.begin())); + Allocator.Deallocate(t); + } + } + + template<class SubClass, class AllocatorType> + SubClass *Allocate(AllocatorType &Allocator) { + assert(sizeof(SubClass) <= Size && + "Recycler allocation size is less than object size!"); + assert(AlignOf<SubClass>::Alignment <= Align && + "Recycler allocation alignment is less than object alignment!"); + return !FreeList.empty() ? + reinterpret_cast<SubClass *>(FreeList.remove(FreeList.begin())) : + static_cast<SubClass *>(Allocator.Allocate(Size, Align)); + } + + template<class AllocatorType> + T *Allocate(AllocatorType &Allocator) { + return Allocate<T>(Allocator); + } + + template<class SubClass, class AllocatorType> + void Deallocate(AllocatorType & /*Allocator*/, SubClass* Element) { + FreeList.push_front(reinterpret_cast<RecyclerStruct *>(Element)); + } + + void PrintStats() { + PrintRecyclerStats(Size, Align, FreeList.size()); + } +}; + +} + +#endif diff --git a/include/llvm/Support/RecyclingAllocator.h b/include/llvm/Support/RecyclingAllocator.h new file mode 100644 index 0000000..8e957f1 --- /dev/null +++ b/include/llvm/Support/RecyclingAllocator.h @@ -0,0 +1,59 @@ +//==- llvm/Support/RecyclingAllocator.h - Recycling Allocator ----*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the RecyclingAllocator class. See the doxygen comment for +// RecyclingAllocator for more details on the implementation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_RECYCLINGALLOCATOR_H +#define LLVM_SUPPORT_RECYCLINGALLOCATOR_H + +#include "llvm/Support/Recycler.h" + +namespace llvm { + +/// RecyclingAllocator - This class wraps an Allocator, adding the +/// functionality of recycling deleted objects. +/// +template<class AllocatorType, class T, + size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment> +class RecyclingAllocator { +private: + /// Base - Implementation details. + /// + Recycler<T, Size, Align> Base; + + /// Allocator - The wrapped allocator. + /// + AllocatorType Allocator; + +public: + ~RecyclingAllocator() { Base.clear(Allocator); } + + /// Allocate - Return a pointer to storage for an object of type + /// SubClass. The storage may be either newly allocated or recycled. + /// + template<class SubClass> + SubClass *Allocate() { return Base.Allocate<SubClass>(Allocator); } + + T *Allocate() { return Base.Allocate(Allocator); } + + /// Deallocate - Release storage for the pointed-to object. The + /// storage will be kept track of and may be recycled. + /// + template<class SubClass> + void Deallocate(SubClass* E) { return Base.Deallocate(Allocator, E); } + + void PrintStats() { Base.PrintStats(); } +}; + +} + +#endif diff --git a/include/llvm/Support/Registry.h b/include/llvm/Support/Registry.h new file mode 100644 index 0000000..454679b --- /dev/null +++ b/include/llvm/Support/Registry.h @@ -0,0 +1,224 @@ +//=== Registry.h - Linker-supported plugin registries -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Defines a registry template for discovering pluggable modules. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_REGISTRY_H +#define LLVM_SUPPORT_REGISTRY_H + +namespace llvm { + /// A simple registry entry which provides only a name, description, and + /// no-argument constructor. + template <typename T> + class SimpleRegistryEntry { + const char *Name, *Desc; + T *(*Ctor)(); + + public: + SimpleRegistryEntry(const char *N, const char *D, T *(*C)()) + : Name(N), Desc(D), Ctor(C) + {} + + const char *getName() const { return Name; } + const char *getDesc() const { return Desc; } + T *instantiate() const { return Ctor(); } + }; + + + /// Traits for registry entries. If using other than SimpleRegistryEntry, it + /// is necessary to define an alternate traits class. + template <typename T> + class RegistryTraits { + RegistryTraits(); // Do not implement. + + public: + typedef SimpleRegistryEntry<T> entry; + + /// nameof/descof - Accessors for name and description of entries. These are + // used to generate help for command-line options. + static const char *nameof(const entry &Entry) { return Entry.getName(); } + static const char *descof(const entry &Entry) { return Entry.getDesc(); } + }; + + + /// A global registry used in conjunction with static constructors to make + /// pluggable components (like targets or garbage collectors) "just work" when + /// linked with an executable. + template <typename T, typename U = RegistryTraits<T> > + class Registry { + public: + typedef U traits; + typedef typename U::entry entry; + + class node; + class listener; + class iterator; + + private: + Registry(); // Do not implement. + + static void Announce(const entry &E) { + for (listener *Cur = ListenerHead; Cur; Cur = Cur->Next) + Cur->registered(E); + } + + friend class node; + static node *Head, *Tail; + + friend class listener; + static listener *ListenerHead, *ListenerTail; + + public: + class iterator; + + + /// Node in linked list of entries. + /// + class node { + friend class iterator; + + node *Next; + const entry& Val; + + public: + node(const entry& V) : Next(0), Val(V) { + if (Tail) + Tail->Next = this; + else + Head = this; + Tail = this; + + Announce(V); + } + }; + + + /// Iterators for registry entries. + /// + class iterator { + const node *Cur; + + public: + explicit iterator(const node *N) : Cur(N) {} + + bool operator==(const iterator &That) const { return Cur == That.Cur; } + bool operator!=(const iterator &That) const { return Cur != That.Cur; } + iterator &operator++() { Cur = Cur->Next; return *this; } + const entry &operator*() const { return Cur->Val; } + const entry *operator->() const { return &Cur->Val; } + }; + + static iterator begin() { return iterator(Head); } + static iterator end() { return iterator(0); } + + + /// Abstract base class for registry listeners, which are informed when new + /// entries are added to the registry. Simply subclass and instantiate: + /// + /// class CollectorPrinter : public Registry<Collector>::listener { + /// protected: + /// void registered(const Registry<Collector>::entry &e) { + /// cerr << "collector now available: " << e->getName() << "\n"; + /// } + /// + /// public: + /// CollectorPrinter() { init(); } // Print those already registered. + /// }; + /// + /// CollectorPrinter Printer; + /// + class listener { + listener *Prev, *Next; + + friend void Registry::Announce(const entry &E); + + protected: + /// Called when an entry is added to the registry. + /// + virtual void registered(const entry &) = 0; + + /// Calls 'registered' for each pre-existing entry. + /// + void init() { + for (iterator I = begin(), E = end(); I != E; ++I) + registered(*I); + } + + public: + listener() : Prev(ListenerTail), Next(0) { + if (Prev) + Prev->Next = this; + else + ListenerHead = this; + ListenerTail = this; + } + + virtual ~listener() { + if (Next) + Next->Prev = Prev; + else + ListenerTail = Prev; + if (Prev) + Prev->Next = Next; + else + ListenerHead = Next; + } + }; + + + /// A static registration template. Use like such: + /// + /// Registry<Collector>::Add<FancyGC> + /// X("fancy-gc", "Newfangled garbage collector."); + /// + /// Use of this template requires that: + /// + /// 1. The registered subclass has a default constructor. + // + /// 2. The registry entry type has a constructor compatible with this + /// signature: + /// + /// entry(const char *Name, const char *ShortDesc, T *(*Ctor)()); + /// + /// If you have more elaborate requirements, then copy and modify. + /// + template <typename V> + class Add { + entry Entry; + node Node; + + static T *CtorFn() { return new V(); } + + public: + Add(const char *Name, const char *Desc) + : Entry(Name, Desc, CtorFn), Node(Entry) {} + }; + + /// Registry::Parser now lives in llvm/Support/RegistryParser.h. + + }; + + + template <typename T, typename U> + typename Registry<T,U>::node *Registry<T,U>::Head; + + template <typename T, typename U> + typename Registry<T,U>::node *Registry<T,U>::Tail; + + template <typename T, typename U> + typename Registry<T,U>::listener *Registry<T,U>::ListenerHead; + + template <typename T, typename U> + typename Registry<T,U>::listener *Registry<T,U>::ListenerTail; + +} + +#endif diff --git a/include/llvm/Support/RegistryParser.h b/include/llvm/Support/RegistryParser.h new file mode 100644 index 0000000..2cc5783 --- /dev/null +++ b/include/llvm/Support/RegistryParser.h @@ -0,0 +1,55 @@ +//=== RegistryParser.h - Linker-supported plugin registries -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Defines a command-line parser for a registry. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_REGISTRY_PARSER_H +#define LLVM_SUPPORT_REGISTRY_PARSER_H + +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Registry.h" + +namespace llvm { + + /// A command-line parser for a registry. Use like such: + /// + /// static cl::opt<Registry<Collector>::entry, false, + /// RegistryParser<Collector> > + /// GCOpt("gc", cl::desc("Garbage collector to use."), + /// cl::value_desc()); + /// + /// To make use of the value: + /// + /// Collector *TheCollector = GCOpt->instantiate(); + /// + template <typename T, typename U = RegistryTraits<T> > + class RegistryParser : + public cl::parser<const typename U::entry*>, + public Registry<T, U>::listener { + typedef U traits; + typedef typename U::entry entry; + typedef typename Registry<T, U>::listener listener; + + protected: + void registered(const entry &E) { + addLiteralOption(traits::nameof(E), &E, traits::descof(E)); + } + + public: + void initialize(cl::Option &O) { + listener::init(); + cl::parser<const typename U::entry*>::initialize(O); + } + }; + +} + +#endif // LLVM_SUPPORT_REGISTRY_PARSER_H diff --git a/include/llvm/Support/SlowOperationInformer.h b/include/llvm/Support/SlowOperationInformer.h new file mode 100644 index 0000000..b30aa98 --- /dev/null +++ b/include/llvm/Support/SlowOperationInformer.h @@ -0,0 +1,65 @@ +//===- llvm/Support/SlowOperationInformer.h - Keep user informed *- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a simple object which can be used to let the user know what +// is going on when a slow operation is happening, and gives them the ability to +// cancel it. Potentially slow operations can stack allocate one of these +// objects, and periodically call the "progress" method to update the progress +// bar. If the operation takes more than 1 second to complete, the progress bar +// is automatically shown and updated. As such, the slow operation should not +// print stuff to the screen, and should not be confused if an extra line +// appears on the screen (ie, the cursor should be at the start of the line). +// +// If the user presses CTRL-C during the operation, the next invocation of the +// progress method return true indicating that the operation was cancelled. +// +// Because SlowOperationInformers fiddle around with signals, they cannot be +// nested, and interact poorly with threads. The SIGALRM handler is set back to +// SIGDFL, but the SIGINT signal handler is restored when the +// SlowOperationInformer is destroyed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_SLOW_OPERATION_INFORMER_H +#define LLVM_SUPPORT_SLOW_OPERATION_INFORMER_H + +#include <string> +#include <cassert> +#include "llvm/Support/DataTypes.h" + +namespace llvm { + class SlowOperationInformer { + std::string OperationName; + unsigned LastPrintAmount; + + SlowOperationInformer(const SlowOperationInformer&); // DO NOT IMPLEMENT + void operator=(const SlowOperationInformer&); // DO NOT IMPLEMENT + public: + SlowOperationInformer(const std::string &Name); + ~SlowOperationInformer(); + + /// progress - Clients should periodically call this method when they can + /// handle cancellation. The Amount variable should indicate how far + /// along the operation is, given in 1/10ths of a percent (in other words, + /// Amount should range from 0 to 1000). If the user cancels the operation, + /// this returns true, false otherwise. + bool progress(unsigned Amount); + + /// progress - Same as the method above, but this performs the division for + /// you, and helps you avoid overflow if you are dealing with largish + /// numbers. + bool progress(unsigned Current, unsigned Maximum) { + assert(Maximum != 0 && + "Shouldn't be doing work if there is nothing to do!"); + return progress(Current*uint64_t(1000UL)/Maximum); + } + }; +} // end namespace llvm + +#endif /* SLOW_OPERATION_INFORMER_H */ diff --git a/include/llvm/Support/StableBasicBlockNumbering.h b/include/llvm/Support/StableBasicBlockNumbering.h new file mode 100644 index 0000000..5e0f87e --- /dev/null +++ b/include/llvm/Support/StableBasicBlockNumbering.h @@ -0,0 +1,59 @@ +//===- StableBasicBlockNumbering.h - Provide BB identifiers -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class provides a *stable* numbering of basic blocks that does not depend +// on their address in memory (which is nondeterministic). When requested, this +// class simply provides a unique ID for each basic block in the function +// specified and the inverse mapping. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_STABLEBASICBLOCKNUMBERING_H +#define LLVM_SUPPORT_STABLEBASICBLOCKNUMBERING_H + +#include "llvm/Function.h" +#include "llvm/ADT/UniqueVector.h" + +namespace llvm { + class StableBasicBlockNumbering { + // BBNumbering - Holds the numbering. + UniqueVector<BasicBlock*> BBNumbering; + public: + StableBasicBlockNumbering(Function *F = 0) { + if (F) compute(*F); + } + + /// compute - If we have not computed a numbering for the function yet, do + /// so. + void compute(Function &F) { + if (BBNumbering.empty()) { + for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) + BBNumbering.insert(I); + } + } + + /// getNumber - Return the ID number for the specified BasicBlock. + /// + unsigned getNumber(BasicBlock *BB) const { + unsigned Idx = BBNumbering.idFor(BB); + assert(Idx && "Invalid basic block or numbering not computed!"); + return Idx-1; + } + + /// getBlock - Return the BasicBlock corresponding to a particular ID. + /// + BasicBlock *getBlock(unsigned N) const { + assert(N < BBNumbering.size() && + "Block ID out of range or numbering not computed!"); + return BBNumbering[N+1]; + } + }; +} + +#endif diff --git a/include/llvm/Support/Streams.h b/include/llvm/Support/Streams.h new file mode 100644 index 0000000..445ab98 --- /dev/null +++ b/include/llvm/Support/Streams.h @@ -0,0 +1,91 @@ +//===- llvm/Support/Streams.h - Wrappers for iostreams ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a wrapper for the STL I/O streams. It prevents the need +// to include <iostream> in a file just to get I/O. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_STREAMS_H +#define LLVM_SUPPORT_STREAMS_H + +#include <iosfwd> + +namespace llvm { + + /// FlushStream - Function called by BaseStream to flush an ostream. + void FlushStream(std::ostream &S); + + /// BaseStream - Acts like the STL streams. It's a wrapper for the std::cerr, + /// std::cout, std::cin, etc. streams. However, it doesn't require #including + /// @verbatim <iostream> @endverbatm in every file (doing so increases static + /// c'tors & d'tors in the object code). + /// + template <typename StreamTy> + class BaseStream { + StreamTy *Stream; + public: + BaseStream() : Stream(0) {} + BaseStream(StreamTy &S) : Stream(&S) {} + BaseStream(StreamTy *S) : Stream(S) {} + + StreamTy *stream() const { return Stream; } + + inline BaseStream &operator << (std::ios_base &(*Func)(std::ios_base&)) { + if (Stream) *Stream << Func; + return *this; + } + + inline BaseStream &operator << (StreamTy &(*Func)(StreamTy&)) { + if (Stream) *Stream << Func; + return *this; + } + + void flush() { + if (Stream) + FlushStream(*Stream); + } + + template <typename Ty> + BaseStream &operator << (const Ty &Thing) { + if (Stream) *Stream << Thing; + return *this; + } + + template <typename Ty> + BaseStream &operator >> (Ty &Thing) { + if (Stream) *Stream >> Thing; + return *this; + } + + template <typename Ty> + BaseStream &write(const Ty &A, unsigned N) { + if (Stream) Stream->write(A, N); + return *this; + } + + operator StreamTy* () { return Stream; } + + bool operator == (const StreamTy &S) { return &S == Stream; } + bool operator != (const StreamTy &S) { return !(*this == S); } + bool operator == (const BaseStream &S) { return S.Stream == Stream; } + bool operator != (const BaseStream &S) { return !(*this == S); } + }; + + typedef BaseStream<std::ostream> OStream; + typedef BaseStream<std::istream> IStream; + typedef BaseStream<std::stringstream> StringStream; + + extern OStream cout; + extern OStream cerr; + extern IStream cin; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/StringPool.h b/include/llvm/Support/StringPool.h new file mode 100644 index 0000000..98db8e2 --- /dev/null +++ b/include/llvm/Support/StringPool.h @@ -0,0 +1,148 @@ +//===-- StringPool.h - Interned string pool -------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares an interned string pool, which helps reduce the cost of +// strings by using the same storage for identical strings. +// +// To intern a string: +// +// StringPool Pool; +// PooledStringPtr Str = Pool.intern("wakka wakka"); +// +// To use the value of an interned string, use operator bool and operator*: +// +// if (Str) +// cerr << "the string is" << *Str << "\n"; +// +// Pooled strings are immutable, but you can change a PooledStringPtr to point +// to another instance. So that interned strings can eventually be freed, +// strings in the string pool are reference-counted (automatically). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_STRINGPOOL_H +#define LLVM_SUPPORT_STRINGPOOL_H + +#include "llvm/ADT/StringMap.h" +#include <new> +#include <cassert> + +namespace llvm { + + class PooledStringPtr; + + /// StringPool - An interned string pool. Use the intern method to add a + /// string. Strings are removed automatically as PooledStringPtrs are + /// destroyed. + class StringPool { + /// PooledString - This is the value of an entry in the pool's interning + /// table. + struct PooledString { + StringPool *Pool; ///< So the string can remove itself. + unsigned Refcount; ///< Number of referencing PooledStringPtrs. + + public: + PooledString() : Pool(0), Refcount(0) { } + }; + + friend class PooledStringPtr; + + typedef StringMap<PooledString> table_t; + typedef StringMapEntry<PooledString> entry_t; + table_t InternTable; + + public: + StringPool(); + ~StringPool(); + + /// intern - Adds a string to the pool and returns a reference-counted + /// pointer to it. No additional memory is allocated if the string already + /// exists in the pool. + PooledStringPtr intern(const char *Begin, const char *End); + + /// intern - Adds a null-terminated string to the pool and returns a + /// reference-counted pointer to it. No additional memory is allocated if + /// the string already exists in the pool. + inline PooledStringPtr intern(const char *Str); + + /// empty - Checks whether the pool is empty. Returns true if so. + /// + inline bool empty() const { return InternTable.empty(); } + }; + + /// PooledStringPtr - A pointer to an interned string. Use operator bool to + /// test whether the pointer is valid, and operator * to get the string if so. + /// This is a lightweight value class with storage requirements equivalent to + /// a single pointer, but it does have reference-counting overhead when + /// copied. + class PooledStringPtr { + typedef StringPool::entry_t entry_t; + entry_t *S; + + public: + PooledStringPtr() : S(0) {} + + explicit PooledStringPtr(entry_t *E) : S(E) { + if (S) ++S->getValue().Refcount; + } + + PooledStringPtr(const PooledStringPtr &That) : S(That.S) { + if (S) ++S->getValue().Refcount; + } + + PooledStringPtr &operator=(const PooledStringPtr &That) { + if (S != That.S) { + clear(); + S = That.S; + if (S) ++S->getValue().Refcount; + } + return *this; + } + + void clear() { + if (!S) + return; + if (--S->getValue().Refcount == 0) { + S->getValue().Pool->InternTable.remove(S); + S->Destroy(); + } + S = 0; + } + + ~PooledStringPtr() { clear(); } + + inline const char *begin() const { + assert(*this && "Attempt to dereference empty PooledStringPtr!"); + return S->getKeyData(); + } + + inline const char *end() const { + assert(*this && "Attempt to dereference empty PooledStringPtr!"); + return S->getKeyData() + S->getKeyLength(); + } + + inline unsigned size() const { + assert(*this && "Attempt to dereference empty PooledStringPtr!"); + return S->getKeyLength(); + } + + inline const char *operator*() const { return begin(); } + inline operator bool() const { return S != 0; } + + inline bool operator==(const PooledStringPtr &That) { return S == That.S; } + inline bool operator!=(const PooledStringPtr &That) { return S != That.S; } + }; + + PooledStringPtr StringPool::intern(const char *Str) { + return intern(Str, Str + strlen(Str)); + } + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/SystemUtils.h b/include/llvm/Support/SystemUtils.h new file mode 100644 index 0000000..9a33fa3 --- /dev/null +++ b/include/llvm/Support/SystemUtils.h @@ -0,0 +1,42 @@ +//===- SystemUtils.h - Utilities to do low-level system stuff ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains functions used to do a variety of low-level, often +// system-specific, tasks. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_SYSTEMUTILS_H +#define LLVM_SUPPORT_SYSTEMUTILS_H + +#include "llvm/System/Program.h" + +namespace llvm { + +/// Determine if the ostream provided is connected to the std::cout and +/// displayed or not (to a console window). If so, generate a warning message +/// advising against display of bitcode and return true. Otherwise just return +/// false +/// @brief Check for output written to a console +bool CheckBitcodeOutputToConsole( + std::ostream* stream_to_check, ///< The stream to be checked + bool print_warning = true ///< Control whether warnings are printed +); + +/// FindExecutable - Find a named executable, giving the argv[0] of program +/// being executed. This allows us to find another LLVM tool if it is built into +/// the same directory, but that directory is neither the current directory, nor +/// in the PATH. If the executable cannot be found, return an empty string. +/// @brief Find a named executable. +sys::Path FindExecutable(const std::string &ExeName, + const std::string &ProgramPath); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/TargetFolder.h b/include/llvm/Support/TargetFolder.h new file mode 100644 index 0000000..14f2c9b --- /dev/null +++ b/include/llvm/Support/TargetFolder.h @@ -0,0 +1,209 @@ +//====-- llvm/Support/TargetFolder.h - Constant folding helper -*- C++ -*-====// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetFolder class, a helper for IRBuilder. +// It provides IRBuilder with a set of methods for creating constants with +// target dependent folding. For general constant creation and folding, +// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_TARGETFOLDER_H +#define LLVM_SUPPORT_TARGETFOLDER_H + +#include "llvm/Constants.h" +#include "llvm/Analysis/ConstantFolding.h" + +namespace llvm { + +class TargetData; + +/// TargetFolder - Create constants with target dependent folding. +class TargetFolder { + const TargetData &TD; + + /// Fold - Fold the constant using target specific information. + Constant *Fold(Constant *C) const { + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) + if (Constant *CF = ConstantFoldConstantExpression(CE, &TD)) + return CF; + return C; + } + +public: + TargetFolder(const TargetData &TheTD) : TD(TheTD) {} + + //===--------------------------------------------------------------------===// + // Binary Operators + //===--------------------------------------------------------------------===// + + Constant *CreateAdd(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getAdd(LHS, RHS)); + } + Constant *CreateSub(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getSub(LHS, RHS)); + } + Constant *CreateMul(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getMul(LHS, RHS)); + } + Constant *CreateUDiv(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getUDiv(LHS, RHS)); + } + Constant *CreateSDiv(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getSDiv(LHS, RHS)); + } + Constant *CreateFDiv(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getFDiv(LHS, RHS)); + } + Constant *CreateURem(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getURem(LHS, RHS)); + } + Constant *CreateSRem(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getSRem(LHS, RHS)); + } + Constant *CreateFRem(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getFRem(LHS, RHS)); + } + Constant *CreateShl(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getShl(LHS, RHS)); + } + Constant *CreateLShr(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getLShr(LHS, RHS)); + } + Constant *CreateAShr(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getAShr(LHS, RHS)); + } + Constant *CreateAnd(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getAnd(LHS, RHS)); + } + Constant *CreateOr(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getOr(LHS, RHS)); + } + Constant *CreateXor(Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::getXor(LHS, RHS)); + } + + Constant *CreateBinOp(Instruction::BinaryOps Opc, + Constant *LHS, Constant *RHS) const { + return Fold(ConstantExpr::get(Opc, LHS, RHS)); + } + + //===--------------------------------------------------------------------===// + // Unary Operators + //===--------------------------------------------------------------------===// + + Constant *CreateNeg(Constant *C) const { + return Fold(ConstantExpr::getNeg(C)); + } + Constant *CreateNot(Constant *C) const { + return Fold(ConstantExpr::getNot(C)); + } + + //===--------------------------------------------------------------------===// + // Memory Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateGetElementPtr(Constant *C, Constant* const *IdxList, + unsigned NumIdx) const { + return Fold(ConstantExpr::getGetElementPtr(C, IdxList, NumIdx)); + } + Constant *CreateGetElementPtr(Constant *C, Value* const *IdxList, + unsigned NumIdx) const { + return Fold(ConstantExpr::getGetElementPtr(C, IdxList, NumIdx)); + } + + //===--------------------------------------------------------------------===// + // Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + Constant *CreateCast(Instruction::CastOps Op, Constant *C, + const Type *DestTy) const { + if (C->getType() == DestTy) + return C; // avoid calling Fold + return Fold(ConstantExpr::getCast(Op, C, DestTy)); + } + Constant *CreateIntCast(Constant *C, const Type *DestTy, + bool isSigned) const { + if (C->getType() == DestTy) + return C; // avoid calling Fold + return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned)); + } + + Constant *CreateBitCast(Constant *C, const Type *DestTy) const { + return CreateCast(Instruction::BitCast, C, DestTy); + } + Constant *CreateIntToPtr(Constant *C, const Type *DestTy) const { + return CreateCast(Instruction::IntToPtr, C, DestTy); + } + Constant *CreatePtrToInt(Constant *C, const Type *DestTy) const { + return CreateCast(Instruction::PtrToInt, C, DestTy); + } + Constant *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const { + if (C->getType() == DestTy) + return C; // avoid calling Fold + return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy)); + } + + //===--------------------------------------------------------------------===// + // Compare Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return Fold(ConstantExpr::getCompare(P, LHS, RHS)); + } + Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return Fold(ConstantExpr::getCompare(P, LHS, RHS)); + } + Constant *CreateVICmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return Fold(ConstantExpr::getCompare(P, LHS, RHS)); + } + Constant *CreateVFCmp(CmpInst::Predicate P, Constant *LHS, + Constant *RHS) const { + return Fold(ConstantExpr::getCompare(P, LHS, RHS)); + } + + //===--------------------------------------------------------------------===// + // Other Instructions + //===--------------------------------------------------------------------===// + + Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const { + return Fold(ConstantExpr::getSelect(C, True, False)); + } + + Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const { + return Fold(ConstantExpr::getExtractElement(Vec, Idx)); + } + + Constant *CreateInsertElement(Constant *Vec, Constant *NewElt, + Constant *Idx) const { + return Fold(ConstantExpr::getInsertElement(Vec, NewElt, Idx)); + } + + Constant *CreateShuffleVector(Constant *V1, Constant *V2, + Constant *Mask) const { + return Fold(ConstantExpr::getShuffleVector(V1, V2, Mask)); + } + + Constant *CreateExtractValue(Constant *Agg, const unsigned *IdxList, + unsigned NumIdx) const { + return Fold(ConstantExpr::getExtractValue(Agg, IdxList, NumIdx)); + } + + Constant *CreateInsertValue(Constant *Agg, Constant *Val, + const unsigned *IdxList, unsigned NumIdx) const { + return Fold(ConstantExpr::getInsertValue(Agg, Val, IdxList, NumIdx)); + } +}; + +} + +#endif diff --git a/include/llvm/Support/Timer.h b/include/llvm/Support/Timer.h new file mode 100644 index 0000000..584199f --- /dev/null +++ b/include/llvm/Support/Timer.h @@ -0,0 +1,172 @@ +//===-- llvm/Support/Timer.h - Interval Timing Support ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines three classes: Timer, TimeRegion, and TimerGroup, +// documented below. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_TIMER_H +#define LLVM_SUPPORT_TIMER_H + +#include "llvm/Support/DataTypes.h" +#include <string> +#include <vector> +#include <iosfwd> +#include <cassert> + +namespace llvm { + +class TimerGroup; + +/// Timer - This class is used to track the amount of time spent between +/// invocations of it's startTimer()/stopTimer() methods. Given appropriate OS +/// support it can also keep track of the RSS of the program at various points. +/// By default, the Timer will print the amount of time it has captured to +/// standard error when the laster timer is destroyed, otherwise it is printed +/// when its TimerGroup is destroyed. Timers do not print their information +/// if they are never started. +/// +class Timer { + double Elapsed; // Wall clock time elapsed in seconds + double UserTime; // User time elapsed + double SystemTime; // System time elapsed + ssize_t MemUsed; // Memory allocated (in bytes) + size_t PeakMem; // Peak memory used + size_t PeakMemBase; // Temporary for peak calculation... + std::string Name; // The name of this time variable + bool Started; // Has this time variable ever been started? + TimerGroup *TG; // The TimerGroup this Timer is in. +public: + explicit Timer(const std::string &N); + Timer(const std::string &N, TimerGroup &tg); + Timer(const Timer &T); + ~Timer(); + + double getProcessTime() const { return UserTime+SystemTime; } + double getWallTime() const { return Elapsed; } + ssize_t getMemUsed() const { return MemUsed; } + size_t getPeakMem() const { return PeakMem; } + std::string getName() const { return Name; } + + const Timer &operator=(const Timer &T) { + Elapsed = T.Elapsed; + UserTime = T.UserTime; + SystemTime = T.SystemTime; + MemUsed = T.MemUsed; + PeakMem = T.PeakMem; + PeakMemBase = T.PeakMemBase; + Name = T.Name; + Started = T.Started; + assert(TG == T.TG && "Can only assign timers in the same TimerGroup!"); + return *this; + } + + // operator< - Allow sorting... + bool operator<(const Timer &T) const { + // Sort by Wall Time elapsed, as it is the only thing really accurate + return Elapsed < T.Elapsed; + } + bool operator>(const Timer &T) const { return T.operator<(*this); } + + /// startTimer - Start the timer running. Time between calls to + /// startTimer/stopTimer is counted by the Timer class. Note that these calls + /// must be correctly paired. + /// + void startTimer(); + + /// stopTimer - Stop the timer. + /// + void stopTimer(); + + /// addPeakMemoryMeasurement - This method should be called whenever memory + /// usage needs to be checked. It adds a peak memory measurement to the + /// currently active timers, which will be printed when the timer group prints + /// + static void addPeakMemoryMeasurement(); + + /// print - Print the current timer to standard error, and reset the "Started" + /// flag. + void print(const Timer &Total, std::ostream &OS); + +private: + friend class TimerGroup; + + // Copy ctor, initialize with no TG member. + Timer(bool, const Timer &T); + + /// sum - Add the time accumulated in the specified timer into this timer. + /// + void sum(const Timer &T); +}; + + +/// The TimeRegion class is used as a helper class to call the startTimer() and +/// stopTimer() methods of the Timer class. When the object is constructed, it +/// starts the timer specified as it's argument. When it is destroyed, it stops +/// the relevant timer. This makes it easy to time a region of code. +/// +class TimeRegion { + Timer *T; + TimeRegion(const TimeRegion &); // DO NOT IMPLEMENT +public: + explicit TimeRegion(Timer &t) : T(&t) { + T->startTimer(); + } + explicit TimeRegion(Timer *t) : T(t) { + if (T) + T->startTimer(); + } + ~TimeRegion() { + if (T) + T->stopTimer(); + } +}; + + +/// NamedRegionTimer - This class is basically a combination of TimeRegion and +/// Timer. It allows you to declare a new timer, AND specify the region to +/// time, all in one statement. All timers with the same name are merged. This +/// is primarily used for debugging and for hunting performance problems. +/// +struct NamedRegionTimer : public TimeRegion { + explicit NamedRegionTimer(const std::string &Name); + explicit NamedRegionTimer(const std::string &Name, + const std::string &GroupName); +}; + + +/// The TimerGroup class is used to group together related timers into a single +/// report that is printed when the TimerGroup is destroyed. It is illegal to +/// destroy a TimerGroup object before all of the Timers in it are gone. A +/// TimerGroup can be specified for a newly created timer in its constructor. +/// +class TimerGroup { + std::string Name; + unsigned NumTimers; + std::vector<Timer> TimersToPrint; +public: + explicit TimerGroup(const std::string &name) : Name(name), NumTimers(0) {} + ~TimerGroup() { + assert(NumTimers == 0 && + "TimerGroup destroyed before all contained timers!"); + } + +private: + friend class Timer; + void addTimer() { ++NumTimers; } + void removeTimer(); + void addTimerToPrint(const Timer &T) { + TimersToPrint.push_back(Timer(true, T)); + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/TypeBuilder.h b/include/llvm/Support/TypeBuilder.h new file mode 100644 index 0000000..5198c81 --- /dev/null +++ b/include/llvm/Support/TypeBuilder.h @@ -0,0 +1,464 @@ +//===---- llvm/Support/TypeBuilder.h - Builder for LLVM types ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TypeBuilder class, which is used as a convenient way to +// create LLVM types with a consistent and simplified interface. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_TYPEBUILDER_H +#define LLVM_SUPPORT_TYPEBUILDER_H + +#include "llvm/DerivedTypes.h" +#include <limits.h> + +namespace llvm { + +/// TypeBuilder - This provides a uniform API for looking up types +/// known at compile time. To support cross-compilation, we define a +/// series of tag types in the llvm::types namespace, like i<N>, +/// ieee_float, ppc_fp128, etc. TypeBuilder<T, false> allows T to be +/// any of these, a native C type (whose size may depend on the host +/// compiler), or a pointer, function, or struct type built out of +/// these. TypeBuilder<T, true> removes native C types from this set +/// to guarantee that its result is suitable for cross-compilation. +/// We define the primitive types, pointer types, and functions up to +/// 5 arguments here, but to use this class with your own types, +/// you'll need to specialize it. For example, say you want to call a +/// function defined externally as: +/// +/// struct MyType { +/// int32 a; +/// int32 *b; +/// void *array[1]; // Intended as a flexible array. +/// }; +/// int8 AFunction(struct MyType *value); +/// +/// You'll want to use +/// Function::Create(TypeBuilder<types::i<8>(MyType*), true>::get(), ...) +/// to declare the function, but when you first try this, your compiler will +/// complain that TypeBuilder<MyType, true>::get() doesn't exist. To fix this, +/// write: +/// +/// namespace llvm { +/// template<bool xcompile> class TypeBuilder<MyType, xcompile> { +/// public: +/// static const StructType *get() { +/// // Using the static result variable ensures that the type is +/// // only looked up once. +/// static const StructType *const result = StructType::get( +/// TypeBuilder<types::i<32>, xcompile>::get(), +/// TypeBuilder<types::i<32>*, xcompile>::get(), +/// TypeBuilder<types::i<8>*[], xcompile>::get(), +/// NULL); +/// return result; +/// } +/// +/// // You may find this a convenient place to put some constants +/// // to help with getelementptr. They don't have any effect on +/// // the operation of TypeBuilder. +/// enum Fields { +/// FIELD_A, +/// FIELD_B, +/// FIELD_ARRAY +/// }; +/// } +/// } // namespace llvm +/// +/// Using the static result variable ensures that the type is only looked up +/// once. +/// +/// TypeBuilder cannot handle recursive types or types you only know at runtime. +/// If you try to give it a recursive type, it will deadlock, infinitely +/// recurse, or throw a recursive_init exception. +template<typename T, bool cross_compilable> class TypeBuilder {}; + +// Types for use with cross-compilable TypeBuilders. These correspond +// exactly with an LLVM-native type. +namespace types { +/// i<N> corresponds to the LLVM IntegerType with N bits. +template<uint32_t num_bits> class i {}; + +// The following classes represent the LLVM floating types. +class ieee_float {}; +class ieee_double {}; +class x86_fp80 {}; +class fp128 {}; +class ppc_fp128 {}; +} // namespace types + +// LLVM doesn't have const or volatile types. +template<typename T, bool cross> class TypeBuilder<const T, cross> + : public TypeBuilder<T, cross> {}; +template<typename T, bool cross> class TypeBuilder<volatile T, cross> + : public TypeBuilder<T, cross> {}; +template<typename T, bool cross> class TypeBuilder<const volatile T, cross> + : public TypeBuilder<T, cross> {}; + +// Pointers +template<typename T, bool cross> class TypeBuilder<T*, cross> { +public: + static const PointerType *get() { + static const PointerType *const result = + PointerType::getUnqual(TypeBuilder<T,cross>::get()); + return result; + } +}; + +/// There is no support for references +template<typename T, bool cross> class TypeBuilder<T&, cross> {}; + +// Arrays +template<typename T, size_t N, bool cross> class TypeBuilder<T[N], cross> { +public: + static const ArrayType *get() { + static const ArrayType *const result = + ArrayType::get(TypeBuilder<T, cross>::get(), N); + return result; + } +}; +/// LLVM uses an array of length 0 to represent an unknown-length array. +template<typename T, bool cross> class TypeBuilder<T[], cross> { +public: + static const ArrayType *get() { + static const ArrayType *const result = + ArrayType::get(TypeBuilder<T, cross>::get(), 0); + return result; + } +}; + +// Define the C integral types only for TypeBuilder<T, false>. +// +// C integral types do not have a defined size. It would be nice to use the +// stdint.h-defined typedefs that do have defined sizes, but we'd run into the +// following problem: +// +// On an ILP32 machine, stdint.h might define: +// +// typedef int int32_t; +// typedef long long int64_t; +// typedef long size_t; +// +// If we defined TypeBuilder<int32_t> and TypeBuilder<int64_t>, then any use of +// TypeBuilder<size_t> would fail. We couldn't define TypeBuilder<size_t> in +// addition to the defined-size types because we'd get duplicate definitions on +// platforms where stdint.h instead defines: +// +// typedef int int32_t; +// typedef long long int64_t; +// typedef int size_t; +// +// So we define all the primitive C types and nothing else. +#define DEFINE_INTEGRAL_TYPEBUILDER(T) \ +template<> class TypeBuilder<T, false> { \ +public: \ + static const IntegerType *get() { \ + static const IntegerType *const result = \ + IntegerType::get(sizeof(T) * CHAR_BIT); \ + return result; \ + } \ +}; \ +template<> class TypeBuilder<T, true> { \ + /* We provide a definition here so users don't accidentally */ \ + /* define these types to work. */ \ +} +DEFINE_INTEGRAL_TYPEBUILDER(char); +DEFINE_INTEGRAL_TYPEBUILDER(signed char); +DEFINE_INTEGRAL_TYPEBUILDER(unsigned char); +DEFINE_INTEGRAL_TYPEBUILDER(short); +DEFINE_INTEGRAL_TYPEBUILDER(unsigned short); +DEFINE_INTEGRAL_TYPEBUILDER(int); +DEFINE_INTEGRAL_TYPEBUILDER(unsigned int); +DEFINE_INTEGRAL_TYPEBUILDER(long); +DEFINE_INTEGRAL_TYPEBUILDER(unsigned long); +#ifdef _MSC_VER +DEFINE_INTEGRAL_TYPEBUILDER(__int64); +DEFINE_INTEGRAL_TYPEBUILDER(unsigned __int64); +#else /* _MSC_VER */ +DEFINE_INTEGRAL_TYPEBUILDER(long long); +DEFINE_INTEGRAL_TYPEBUILDER(unsigned long long); +#endif /* _MSC_VER */ +#undef DEFINE_INTEGRAL_TYPEBUILDER + +template<uint32_t num_bits, bool cross> +class TypeBuilder<types::i<num_bits>, cross> { +public: + static const IntegerType *get() { + static const IntegerType *const result = IntegerType::get(num_bits); + return result; + } +}; + +template<> class TypeBuilder<float, false> { +public: + static const Type *get() { + return Type::FloatTy; + } +}; +template<> class TypeBuilder<float, true> {}; + +template<> class TypeBuilder<double, false> { +public: + static const Type *get() { + return Type::DoubleTy; + } +}; +template<> class TypeBuilder<double, true> {}; + +template<bool cross> class TypeBuilder<types::ieee_float, cross> { +public: + static const Type *get() { return Type::FloatTy; } +}; +template<bool cross> class TypeBuilder<types::ieee_double, cross> { +public: + static const Type *get() { return Type::DoubleTy; } +}; +template<bool cross> class TypeBuilder<types::x86_fp80, cross> { +public: + static const Type *get() { return Type::X86_FP80Ty; } +}; +template<bool cross> class TypeBuilder<types::fp128, cross> { +public: + static const Type *get() { return Type::FP128Ty; } +}; +template<bool cross> class TypeBuilder<types::ppc_fp128, cross> { +public: + static const Type *get() { return Type::PPC_FP128Ty; } +}; + +template<bool cross> class TypeBuilder<void, cross> { +public: + static const Type *get() { + return Type::VoidTy; + } +}; + +/// void* is disallowed in LLVM types, but it occurs often enough in C code that +/// we special case it. +template<> class TypeBuilder<void*, false> + : public TypeBuilder<types::i<8>*, false> {}; + +template<typename R, bool cross> class TypeBuilder<R(), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + return FunctionType::get(TypeBuilder<R, cross>::get(), params, false); + } +}; +template<typename R, typename A1, bool cross> class TypeBuilder<R(A1), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(1); + params.push_back(TypeBuilder<A1, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, false); + } +}; +template<typename R, typename A1, typename A2, bool cross> +class TypeBuilder<R(A1, A2), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(2); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, false); + } +}; +template<typename R, typename A1, typename A2, typename A3, bool cross> +class TypeBuilder<R(A1, A2, A3), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(3); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + params.push_back(TypeBuilder<A3, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, false); + } +}; + +template<typename R, typename A1, typename A2, typename A3, typename A4, + bool cross> +class TypeBuilder<R(A1, A2, A3, A4), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(4); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + params.push_back(TypeBuilder<A3, cross>::get()); + params.push_back(TypeBuilder<A4, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, false); + } +}; + +template<typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, bool cross> +class TypeBuilder<R(A1, A2, A3, A4, A5), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(5); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + params.push_back(TypeBuilder<A3, cross>::get()); + params.push_back(TypeBuilder<A4, cross>::get()); + params.push_back(TypeBuilder<A5, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, false); + } +}; + +template<typename R, bool cross> class TypeBuilder<R(...), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + return FunctionType::get(TypeBuilder<R, cross>::get(), params, true); + } +}; +template<typename R, typename A1, bool cross> +class TypeBuilder<R(A1, ...), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(1); + params.push_back(TypeBuilder<A1, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, true); + } +}; +template<typename R, typename A1, typename A2, bool cross> +class TypeBuilder<R(A1, A2, ...), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(2); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, true); + } +}; +template<typename R, typename A1, typename A2, typename A3, bool cross> +class TypeBuilder<R(A1, A2, A3, ...), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(3); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + params.push_back(TypeBuilder<A3, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, true); + } +}; + +template<typename R, typename A1, typename A2, typename A3, typename A4, + bool cross> +class TypeBuilder<R(A1, A2, A3, A4, ...), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(4); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + params.push_back(TypeBuilder<A3, cross>::get()); + params.push_back(TypeBuilder<A4, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, true); + } +}; + +template<typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, bool cross> +class TypeBuilder<R(A1, A2, A3, A4, A5, ...), cross> { +public: + static const FunctionType *get() { + static const FunctionType *const result = create(); + return result; + } + +private: + static const FunctionType *create() { + std::vector<const Type*> params; + params.reserve(5); + params.push_back(TypeBuilder<A1, cross>::get()); + params.push_back(TypeBuilder<A2, cross>::get()); + params.push_back(TypeBuilder<A3, cross>::get()); + params.push_back(TypeBuilder<A4, cross>::get()); + params.push_back(TypeBuilder<A5, cross>::get()); + return FunctionType::get(TypeBuilder<R, cross>::get(), params, true); + } +}; + +} // namespace llvm + +#endif diff --git a/include/llvm/Support/ValueHandle.h b/include/llvm/Support/ValueHandle.h new file mode 100644 index 0000000..a97a5e8 --- /dev/null +++ b/include/llvm/Support/ValueHandle.h @@ -0,0 +1,272 @@ +//===- llvm/Support/ValueHandle.h - Value Smart Pointer classes -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the ValueHandle class and its sub-classes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_VALUEHANDLE_H +#define LLVM_SUPPORT_VALUEHANDLE_H + +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/Value.h" + +namespace llvm { +class ValueHandleBase; + +// ValueHandleBase** is only 4-byte aligned. +template<> +class PointerLikeTypeTraits<ValueHandleBase**> { +public: + static inline void *getAsVoidPointer(ValueHandleBase** P) { return P; } + static inline ValueHandleBase **getFromVoidPointer(void *P) { + return static_cast<ValueHandleBase**>(P); + } + enum { NumLowBitsAvailable = 2 }; +}; + +/// ValueHandleBase - This is the common base class of value handles. +/// ValueHandle's are smart pointers to Value's that have special behavior when +/// the value is deleted or ReplaceAllUsesWith'd. See the specific handles +/// below for details. +/// +class ValueHandleBase { + friend class Value; +protected: + /// HandleBaseKind - This indicates what sub class the handle actually is. + /// This is to avoid having a vtable for the light-weight handle pointers. The + /// fully general Callback version does have a vtable. + enum HandleBaseKind { + Assert, + Weak, + Callback + }; +private: + + PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair; + ValueHandleBase *Next; + Value *VP; +public: + explicit ValueHandleBase(HandleBaseKind Kind) + : PrevPair(0, Kind), Next(0), VP(0) {} + ValueHandleBase(HandleBaseKind Kind, Value *V) + : PrevPair(0, Kind), Next(0), VP(V) { + if (V) + AddToUseList(); + } + ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS) + : PrevPair(0, Kind), Next(0), VP(RHS.VP) { + if (VP) + AddToExistingUseList(RHS.getPrevPtr()); + } + ~ValueHandleBase() { + if (VP) + RemoveFromUseList(); + } + + Value *operator=(Value *RHS) { + if (VP == RHS) return RHS; + if (VP) RemoveFromUseList(); + VP = RHS; + if (VP) AddToUseList(); + return RHS; + } + + Value *operator=(const ValueHandleBase &RHS) { + if (VP == RHS.VP) return RHS.VP; + if (VP) RemoveFromUseList(); + VP = RHS.VP; + if (VP) AddToExistingUseList(RHS.getPrevPtr()); + return VP; + } + + Value *operator->() const { return getValPtr(); } + Value &operator*() const { return *getValPtr(); } + +protected: + Value *getValPtr() const { return VP; } +private: + // Callbacks made from Value. + static void ValueIsDeleted(Value *V); + static void ValueIsRAUWd(Value *Old, Value *New); + + // Internal implementation details. + ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); } + HandleBaseKind getKind() const { return PrevPair.getInt(); } + void setPrevPtr(ValueHandleBase **Ptr) { PrevPair.setPointer(Ptr); } + + /// AddToUseList - Add this ValueHandle to the use list for VP, where List is + /// known to point into the existing use list. + void AddToExistingUseList(ValueHandleBase **List); + + /// AddToUseList - Add this ValueHandle to the use list for VP. + void AddToUseList(); + /// RemoveFromUseList - Remove this ValueHandle from its current use list. + void RemoveFromUseList(); +}; + +/// WeakVH - This is a value handle that tries hard to point to a Value, even +/// across RAUW operations, but will null itself out if the value is destroyed. +/// this is useful for advisory sorts of information, but should not be used as +/// the key of a map (since the map would have to rearrange itself when the +/// pointer changes). +class WeakVH : public ValueHandleBase { +public: + WeakVH() : ValueHandleBase(Weak) {} + WeakVH(Value *P) : ValueHandleBase(Weak, P) {} + WeakVH(const WeakVH &RHS) + : ValueHandleBase(Weak, RHS) {} + + operator Value*() const { + return getValPtr(); + } +}; + +// Specialize simplify_type to allow WeakVH to participate in +// dyn_cast, isa, etc. +template<typename From> struct simplify_type; +template<> struct simplify_type<const WeakVH> { + typedef Value* SimpleType; + static SimpleType getSimplifiedValue(const WeakVH &WVH) { + return static_cast<Value *>(WVH); + } +}; +template<> struct simplify_type<WeakVH> : public simplify_type<const WeakVH> {}; + +/// AssertingVH - This is a Value Handle that points to a value and asserts out +/// if the value is destroyed while the handle is still live. This is very +/// useful for catching dangling pointer bugs and other things which can be +/// non-obvious. One particularly useful place to use this is as the Key of a +/// map. Dangling pointer bugs often lead to really subtle bugs that only occur +/// if another object happens to get allocated to the same address as the old +/// one. Using an AssertingVH ensures that an assert is triggered as soon as +/// the bad delete occurs. +/// +/// Note that an AssertingVH handle does *not* follow values across RAUW +/// operations. This means that RAUW's need to explicitly update the +/// AssertingVH's as it moves. This is required because in non-assert mode this +/// class turns into a trivial wrapper around a pointer. +template <typename ValueTy> +class AssertingVH +#ifndef NDEBUG + : public ValueHandleBase +#endif + { + +#ifndef NDEBUG + ValueTy *getValPtr() const { + return static_cast<ValueTy*>(ValueHandleBase::getValPtr()); + } + void setValPtr(ValueTy *P) { + ValueHandleBase::operator=(P); + } +#else + ValueTy *ThePtr; + ValueTy *getValPtr() const { return ThePtr; } + void setValPtr(ValueTy *P) { ThePtr = P; } +#endif + +public: +#ifndef NDEBUG + AssertingVH() : ValueHandleBase(Assert) {} + AssertingVH(ValueTy *P) : ValueHandleBase(Assert, P) {} + AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {} +#else + AssertingVH() : ThePtr(0) {} + AssertingVH(ValueTy *P) : ThePtr(P) {} +#endif + + operator ValueTy*() const { + return getValPtr(); + } + + ValueTy *operator=(ValueTy *RHS) { + setValPtr(RHS); + return getValPtr(); + } + ValueTy *operator=(AssertingVH<ValueTy> &RHS) { + setValPtr(RHS.getValPtr()); + return getValPtr(); + } + + ValueTy *operator->() const { return getValPtr(); } + ValueTy &operator*() const { return *getValPtr(); } +}; + +// Specialize simplify_type to allow AssertingVH to participate in +// dyn_cast, isa, etc. +template<typename From> struct simplify_type; +template<> struct simplify_type<const AssertingVH<Value> > { + typedef Value* SimpleType; + static SimpleType getSimplifiedValue(const AssertingVH<Value> &AVH) { + return static_cast<Value *>(AVH); + } +}; +template<> struct simplify_type<AssertingVH<Value> > + : public simplify_type<const AssertingVH<Value> > {}; + +/// CallbackVH - This is a value handle that allows subclasses to define +/// callbacks that run when the underlying Value has RAUW called on it or is +/// destroyed. This class can be used as the key of a map, as long as the user +/// takes it out of the map before calling setValPtr() (since the map has to +/// rearrange itself when the pointer changes). Unlike ValueHandleBase, this +/// class has a vtable and a virtual destructor. +class CallbackVH : public ValueHandleBase { +protected: + CallbackVH(const CallbackVH &RHS) + : ValueHandleBase(Callback, RHS) {} + + virtual ~CallbackVH(); + + void setValPtr(Value *P) { + ValueHandleBase::operator=(P); + } + +public: + CallbackVH() : ValueHandleBase(Callback) {} + CallbackVH(Value *P) : ValueHandleBase(Callback, P) {} + + operator Value*() const { + return getValPtr(); + } + + /// Called when this->getValPtr() is destroyed, inside ~Value(), so you may + /// call any non-virtual Value method on getValPtr(), but no subclass methods. + /// If WeakVH were implemented as a CallbackVH, it would use this method to + /// call setValPtr(NULL). AssertingVH would use this method to cause an + /// assertion failure. + /// + /// All implementations must remove the reference from this object to the + /// Value that's being destroyed. + virtual void deleted() { + setValPtr(NULL); + } + + /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called, + /// _before_ any of the uses have actually been replaced. If WeakVH were + /// implemented as a CallbackVH, it would use this method to call + /// setValPtr(new_value). AssertingVH would do nothing in this method. + virtual void allUsesReplacedWith(Value *new_value) {} +}; + +// Specialize simplify_type to allow CallbackVH to participate in +// dyn_cast, isa, etc. +template<typename From> struct simplify_type; +template<> struct simplify_type<const CallbackVH> { + typedef Value* SimpleType; + static SimpleType getSimplifiedValue(const CallbackVH &CVH) { + return static_cast<Value *>(CVH); + } +}; +template<> struct simplify_type<CallbackVH> + : public simplify_type<const CallbackVH> {}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h new file mode 100644 index 0000000..b67d126 --- /dev/null +++ b/include/llvm/Support/raw_ostream.h @@ -0,0 +1,346 @@ +//===--- raw_ostream.h - Raw output stream ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the raw_ostream class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_RAW_OSTREAM_H +#define LLVM_SUPPORT_RAW_OSTREAM_H + +#include "llvm/ADT/StringExtras.h" +#include <cassert> +#include <cstring> +#include <string> +#include <iosfwd> + +namespace llvm { + class format_object_base; + template <typename T> + class SmallVectorImpl; + +/// raw_ostream - This class implements an extremely fast bulk output stream +/// that can *only* output to a stream. It does not support seeking, reopening, +/// rewinding, line buffered disciplines etc. It is a simple buffer that outputs +/// a chunk at a time. +class raw_ostream { +private: + /// The buffer is handled in such a way that the buffer is + /// uninitialized, unbuffered, or out of space when OutBufCur >= + /// OutBufEnd. Thus a single comparison suffices to determine if we + /// need to take the slow path to write a single character. + /// + /// The buffer is in one of three states: + /// 1. Unbuffered (Unbuffered == true) + /// 1. Uninitialized (Unbuffered == false && OutBufStart == 0). + /// 2. Buffered (Unbuffered == false && OutBufStart != 0 && + /// OutBufEnd - OutBufStart >= 64). + char *OutBufStart, *OutBufEnd, *OutBufCur; + bool Unbuffered; + +public: + explicit raw_ostream(bool unbuffered=false) : Unbuffered(unbuffered) { + // Start out ready to flush. + OutBufStart = OutBufEnd = OutBufCur = 0; + } + + virtual ~raw_ostream() { + delete [] OutBufStart; + } + + /// tell - Return the current offset with the file. + uint64_t tell() { return current_pos() + GetNumBytesInBuffer(); } + + //===--------------------------------------------------------------------===// + // Configuration Interface + //===--------------------------------------------------------------------===// + + /// SetBufferSize - Set the internal buffer size to the specified amount + /// instead of the default. + void SetBufferSize(unsigned Size=4096) { + assert(Size >= 64 && + "Buffer size must be somewhat large for invariants to hold"); + flush(); + + delete [] OutBufStart; + OutBufStart = new char[Size]; + OutBufEnd = OutBufStart+Size; + OutBufCur = OutBufStart; + Unbuffered = false; + } + + /// SetUnbuffered - Set the streams buffering status. When + /// unbuffered the stream will flush after every write. This routine + /// will also flush the buffer immediately when the stream is being + /// set to unbuffered. + void SetUnbuffered() { + flush(); + + delete [] OutBufStart; + OutBufStart = OutBufEnd = OutBufCur = 0; + Unbuffered = true; + } + + unsigned GetNumBytesInBuffer() const { + return OutBufCur - OutBufStart; + } + + //===--------------------------------------------------------------------===// + // Data Output Interface + //===--------------------------------------------------------------------===// + + void flush() { + if (OutBufCur != OutBufStart) + flush_nonempty(); + } + + raw_ostream &operator<<(char C) { + if (OutBufCur >= OutBufEnd) + return write(C); + *OutBufCur++ = C; + return *this; + } + + raw_ostream &operator<<(unsigned char C) { + if (OutBufCur >= OutBufEnd) + return write(C); + *OutBufCur++ = C; + return *this; + } + + raw_ostream &operator<<(signed char C) { + if (OutBufCur >= OutBufEnd) + return write(C); + *OutBufCur++ = C; + return *this; + } + + raw_ostream &operator<<(const char *Str) { + // Inline fast path, particulary for constant strings where a + // sufficiently smart compiler will simplify strlen. + + unsigned Size = strlen(Str); + + // Make sure we can use the fast path. + if (OutBufCur+Size > OutBufEnd) + return write(Str, Size); + + memcpy(OutBufCur, Str, Size); + OutBufCur += Size; + return *this; + } + + raw_ostream &operator<<(const std::string& Str) { + write(Str.data(), Str.length()); + return *this; + } + + raw_ostream &operator<<(unsigned long N); + raw_ostream &operator<<(long N); + raw_ostream &operator<<(unsigned long long N); + raw_ostream &operator<<(long long N); + raw_ostream &operator<<(const void *P); + raw_ostream &operator<<(unsigned int N) { + this->operator<<(static_cast<unsigned long>(N)); + return *this; + } + + raw_ostream &operator<<(int N) { + this->operator<<(static_cast<long>(N)); + return *this; + } + + raw_ostream &operator<<(double N) { + this->operator<<(ftostr(N)); + return *this; + } + + raw_ostream &write(unsigned char C); + raw_ostream &write(const char *Ptr, unsigned Size); + + // Formatted output, see the format() function in Support/Format.h. + raw_ostream &operator<<(const format_object_base &Fmt); + + //===--------------------------------------------------------------------===// + // Subclass Interface + //===--------------------------------------------------------------------===// + +private: + /// write_impl - The is the piece of the class that is implemented + /// by subclasses. This writes the \args Size bytes starting at + /// \arg Ptr to the underlying stream. + /// + /// \invariant { Size > 0 } + virtual void write_impl(const char *Ptr, unsigned Size) = 0; + + // An out of line virtual method to provide a home for the class vtable. + virtual void handle(); + + /// current_pos - Return the current position within the stream, not + /// counting the bytes currently in the buffer. + virtual uint64_t current_pos() = 0; + + //===--------------------------------------------------------------------===// + // Private Interface + //===--------------------------------------------------------------------===// +private: + /// flush_nonempty - Flush the current buffer, which is known to be + /// non-empty. This outputs the currently buffered data and resets + /// the buffer to empty. + void flush_nonempty(); +}; + +//===----------------------------------------------------------------------===// +// File Output Streams +//===----------------------------------------------------------------------===// + +/// raw_fd_ostream - A raw_ostream that writes to a file descriptor. +/// +class raw_fd_ostream : public raw_ostream { + int FD; + bool ShouldClose; + uint64_t pos; + + /// write_impl - See raw_ostream::write_impl. + virtual void write_impl(const char *Ptr, unsigned Size); + + /// current_pos - Return the current position within the stream, not + /// counting the bytes currently in the buffer. + virtual uint64_t current_pos() { return pos; } + +public: + /// raw_fd_ostream - Open the specified file for writing. If an + /// error occurs, information about the error is put into ErrorInfo, + /// and the stream should be immediately destroyed; the string will + /// be empty if no error occurred. + /// + /// \param Filename - The file to open. If this is "-" then the + /// stream will use stdout instead. + /// \param Binary - The file should be opened in binary mode on + /// platforms that support this distinction. + raw_fd_ostream(const char *Filename, bool Binary, std::string &ErrorInfo); + + /// raw_fd_ostream ctor - FD is the file descriptor that this writes to. If + /// ShouldClose is true, this closes the file when the stream is destroyed. + raw_fd_ostream(int fd, bool shouldClose, + bool unbuffered=false) : raw_ostream(unbuffered), FD(fd), + ShouldClose(shouldClose) {} + + ~raw_fd_ostream(); + + /// close - Manually flush the stream and close the file. + void close(); + + /// tell - Return the current offset with the file. + uint64_t tell() { return pos + GetNumBytesInBuffer(); } + + /// seek - Flushes the stream and repositions the underlying file descriptor + /// positition to the offset specified from the beginning of the file. + uint64_t seek(uint64_t off); +}; + +/// raw_stdout_ostream - This is a stream that always prints to stdout. +/// +class raw_stdout_ostream : public raw_fd_ostream { + // An out of line virtual method to provide a home for the class vtable. + virtual void handle(); +public: + raw_stdout_ostream(); +}; + +/// raw_stderr_ostream - This is a stream that always prints to stderr. +/// +class raw_stderr_ostream : public raw_fd_ostream { + // An out of line virtual method to provide a home for the class vtable. + virtual void handle(); +public: + raw_stderr_ostream(); +}; + +/// outs() - This returns a reference to a raw_ostream for standard output. +/// Use it like: outs() << "foo" << "bar"; +raw_ostream &outs(); + +/// errs() - This returns a reference to a raw_ostream for standard error. +/// Use it like: errs() << "foo" << "bar"; +raw_ostream &errs(); + + +//===----------------------------------------------------------------------===// +// Output Stream Adaptors +//===----------------------------------------------------------------------===// + +/// raw_os_ostream - A raw_ostream that writes to an std::ostream. This is a +/// simple adaptor class. +class raw_os_ostream : public raw_ostream { + std::ostream &OS; + + /// write_impl - See raw_ostream::write_impl. + virtual void write_impl(const char *Ptr, unsigned Size); + + /// current_pos - Return the current position within the stream, not + /// counting the bytes currently in the buffer. + virtual uint64_t current_pos(); + +public: + raw_os_ostream(std::ostream &O) : OS(O) {} + ~raw_os_ostream(); + + /// tell - Return the current offset with the stream. + uint64_t tell(); +}; + +/// raw_string_ostream - A raw_ostream that writes to an std::string. This is a +/// simple adaptor class. +class raw_string_ostream : public raw_ostream { + std::string &OS; + + /// write_impl - See raw_ostream::write_impl. + virtual void write_impl(const char *Ptr, unsigned Size); + + /// current_pos - Return the current position within the stream, not + /// counting the bytes currently in the buffer. + virtual uint64_t current_pos() { return OS.size(); } +public: + raw_string_ostream(std::string &O) : OS(O) {} + ~raw_string_ostream(); + + /// tell - Return the current offset with the stream. + uint64_t tell() { return OS.size() + GetNumBytesInBuffer(); } + + /// str - Flushes the stream contents to the target string and returns + /// the string's reference. + std::string& str() { + flush(); + return OS; + } +}; + +/// raw_svector_ostream - A raw_ostream that writes to an SmallVector or +/// SmallString. This is a simple adaptor class. +class raw_svector_ostream : public raw_ostream { + SmallVectorImpl<char> &OS; + + /// write_impl - See raw_ostream::write_impl. + virtual void write_impl(const char *Ptr, unsigned Size); + + /// current_pos - Return the current position within the stream, not + /// counting the bytes currently in the buffer. + virtual uint64_t current_pos(); +public: + raw_svector_ostream(SmallVectorImpl<char> &O) : OS(O) {} + ~raw_svector_ostream(); + + /// tell - Return the current offset with the stream. + uint64_t tell(); +}; + +} // end llvm namespace + +#endif diff --git a/include/llvm/Support/type_traits.h b/include/llvm/Support/type_traits.h new file mode 100644 index 0000000..5000a8b --- /dev/null +++ b/include/llvm/Support/type_traits.h @@ -0,0 +1,54 @@ +//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides a template class that determines if a type is a class or +// not. The basic mechanism, based on using the pointer to member function of +// a zero argument to a function was "boosted" from the boost type_traits +// library. See http://www.boost.org/ for all the gory details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_TYPE_TRAITS_H +#define LLVM_SUPPORT_TYPE_TRAITS_H + +// This is actually the conforming implementation which works with abstract +// classes. However, enough compilers have trouble with it that most will use +// the one in boost/type_traits/object_traits.hpp. This implementation actually +// works with VC7.0, but other interactions seem to fail when we use it. + +namespace llvm { + +namespace dont_use +{ + // These two functions should never be used. They are helpers to + // the is_class template below. They cannot be located inside + // is_class because doing so causes at least GCC to think that + // the value of the "value" enumerator is not constant. Placing + // them out here (for some strange reason) allows the sizeof + // operator against them to magically be constant. This is + // important to make the is_class<T>::value idiom zero cost. it + // evaluates to a constant 1 or 0 depending on whether the + // parameter T is a class or not (respectively). + template<typename T> char is_class_helper(void(T::*)(void)); + template<typename T> double is_class_helper(...); +} + +template <typename T> +struct is_class +{ + // is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For + // more details: + // http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1 + public: + enum { value = sizeof(char) == sizeof(dont_use::is_class_helper<T>(0)) }; +}; + +} + +#endif diff --git a/include/llvm/SymbolTableListTraits.h b/include/llvm/SymbolTableListTraits.h new file mode 100644 index 0000000..337b76f --- /dev/null +++ b/include/llvm/SymbolTableListTraits.h @@ -0,0 +1,79 @@ +//===-- llvm/SymbolTableListTraits.h - Traits for iplist --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a generic class that is used to implement the automatic +// symbol table manipulation that occurs when you put (for example) a named +// instruction into a basic block. +// +// The way that this is implemented is by using a special traits class with the +// intrusive list that makes up the list of instructions in a basic block. When +// a new element is added to the list of instructions, the traits class is +// notified, allowing the symbol table to be updated. +// +// This generic class implements the traits class. It must be generic so that +// it can work for all uses it, which include lists of instructions, basic +// blocks, arguments, functions, global variables, etc... +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYMBOLTABLELISTTRAITS_H +#define LLVM_SYMBOLTABLELISTTRAITS_H + +#include "llvm/ADT/ilist.h" + +namespace llvm { + +template<typename NodeTy> class ilist_iterator; +template<typename NodeTy, typename Traits> class iplist; +template<typename Ty> struct ilist_traits; + +// ValueSubClass - The type of objects that I hold, e.g. Instruction. +// ItemParentClass - The type of object that owns the list, e.g. BasicBlock. +// +template<typename ValueSubClass, typename ItemParentClass> +class SymbolTableListTraits : public ilist_default_traits<ValueSubClass> { + typedef ilist_traits<ValueSubClass> TraitsClass; +public: + SymbolTableListTraits() {} + + /// getListOwner - Return the object that owns this list. If this is a list + /// of instructions, it returns the BasicBlock that owns them. + ItemParentClass *getListOwner() { + typedef iplist<ValueSubClass> ItemParentClass::*Sublist; + Sublist Sub(ItemParentClass:: + getSublistAccess(static_cast<ValueSubClass*>(0))); + size_t Offset(size_t(&((ItemParentClass*)0->*Sub))); + iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this)); + return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)- + Offset); + } + + static iplist<ValueSubClass> &getList(ItemParentClass *Par) { + return Par->*(Par->getSublistAccess((ValueSubClass*)0)); + } + + static ValueSymbolTable *getSymTab(ItemParentClass *Par) { + return Par ? toPtr(Par->getValueSymbolTable()) : 0; + } + + void addNodeToList(ValueSubClass *V); + void removeNodeFromList(ValueSubClass *V); + void transferNodesFromList(ilist_traits<ValueSubClass> &L2, + ilist_iterator<ValueSubClass> first, + ilist_iterator<ValueSubClass> last); +//private: + template<typename TPtr> + void setSymTabObject(TPtr *, TPtr); + static ValueSymbolTable *toPtr(ValueSymbolTable *P) { return P; } + static ValueSymbolTable *toPtr(ValueSymbolTable &R) { return &R; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/System/Alarm.h b/include/llvm/System/Alarm.h new file mode 100644 index 0000000..9535d23 --- /dev/null +++ b/include/llvm/System/Alarm.h @@ -0,0 +1,50 @@ +//===- llvm/System/Alarm.h - Alarm Generation support ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides an operating system independent interface to alarm(2) +// type functionality. The Alarm class allows a one-shot alarm to be set up +// at some number of seconds in the future. When the alarm triggers, a method +// is called to process the event +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_ALARM_H +#define LLVM_SYSTEM_ALARM_H + +namespace llvm { +namespace sys { + + /// This function registers an alarm to trigger some number of \p seconds in + /// the future. When that time arrives, the AlarmStatus function will begin + /// to return 1 instead of 0. The user must poll the status of the alarm by + /// making occasional calls to AlarmStatus. If the user sends an interrupt + /// signal, AlarmStatus will begin returning -1, even if the alarm event + /// occurred. + /// @returns nothing + void SetupAlarm( + unsigned seconds ///< Number of seconds in future when alarm arrives + ); + + /// This function terminates the alarm previously set up + /// @returns nothing + void TerminateAlarm(); + + /// This function acquires the status of the alarm. + /// @returns -1=cancelled, 0=untriggered, 1=triggered + int AlarmStatus(); + + /// Sleep for n seconds. + /// @returns nothing. + void Sleep(unsigned n); + + +} // End sys namespace +} // End llvm namespace + +#endif diff --git a/include/llvm/System/Atomic.h b/include/llvm/System/Atomic.h new file mode 100644 index 0000000..cb9277c --- /dev/null +++ b/include/llvm/System/Atomic.h @@ -0,0 +1,30 @@ +//===- llvm/System/Atomic.h - Atomic Operations -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the llvm::sys atomic operations. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_ATOMIC_H +#define LLVM_SYSTEM_ATOMIC_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + namespace sys { + void MemoryFence(); + + typedef uint32_t cas_flag; + cas_flag CompareAndSwap(volatile cas_flag* ptr, + cas_flag new_value, + cas_flag old_value); + } +} + +#endif diff --git a/include/llvm/System/Disassembler.h b/include/llvm/System/Disassembler.h new file mode 100644 index 0000000..d1d8a81 --- /dev/null +++ b/include/llvm/System/Disassembler.h @@ -0,0 +1,35 @@ +//===- llvm/Support/Disassembler.h ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the necessary glue to call external disassembler +// libraries. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_DISASSEMBLER_H +#define LLVM_SYSTEM_DISASSEMBLER_H + +#include "llvm/Support/DataTypes.h" +#include <string> + +namespace llvm { +namespace sys { + +/// This function returns true, if there is possible to use some external +/// disassembler library. False otherwise. +bool hasDisassembler(void); + +/// This function provides some "glue" code to call external disassembler +/// libraries. +std::string disassembleBuffer(uint8_t* start, size_t length, uint64_t pc = 0); + +} +} + +#endif // LLVM_SYSTEM_DISASSEMBLER_H diff --git a/include/llvm/System/DynamicLibrary.h b/include/llvm/System/DynamicLibrary.h new file mode 100644 index 0000000..409a9d2 --- /dev/null +++ b/include/llvm/System/DynamicLibrary.h @@ -0,0 +1,98 @@ +//===-- llvm/System/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the sys::DynamicLibrary class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_DYNAMIC_LIBRARY_H +#define LLVM_SYSTEM_DYNAMIC_LIBRARY_H + +#include "llvm/System/Path.h" +#include <string> + +namespace llvm { +namespace sys { + + /// This class provides a portable interface to dynamic libraries which also + /// might be known as shared libraries, shared objects, dynamic shared + /// objects, or dynamic link libraries. Regardless of the terminology or the + /// operating system interface, this class provides a portable interface that + /// allows dynamic libraries to be loaded and and searched for externally + /// defined symbols. This is typically used to provide "plug-in" support. + /// It also allows for symbols to be defined which don't live in any library, + /// but rather the main program itself, useful on Windows where the main + /// executable cannot be searched. + class DynamicLibrary { + /// @name Constructors + /// @{ + public: + /// Construct a DynamicLibrary that represents the currently executing + /// program. The program must have been linked with -export-dynamic or + /// -dlopen self for this to work. + /// @throws std::string indicating why the program couldn't be opened. + /// @brief Open program as dynamic library. + DynamicLibrary(); + + /// After destruction, the symbols of the library will no longer be + /// available to the program. + /// @brief Closes the DynamicLibrary + ~DynamicLibrary(); + + /// @} + /// @name Functions + /// @{ + public: + /// This function allows a library to be loaded without instantiating a + /// DynamicLibrary object. Consequently, it is marked as being permanent + /// and will only be unloaded when the program terminates. This returns + /// false on success or returns true and fills in *ErrMsg on failure. + /// @brief Open a dynamic library permanently. + static bool LoadLibraryPermanently(const char* filename, + std::string *ErrMsg = 0); + + /// This function will search through all previously loaded dynamic + /// libraries for the symbol \p symbolName. If it is found, the addressof + /// that symbol is returned. If not, null is returned. Note that this will + /// search permanently loaded libraries (LoadLibraryPermanently) as well + /// as ephemerally loaded libraries (constructors). + /// @throws std::string on error. + /// @brief Search through libraries for address of a symbol + static void* SearchForAddressOfSymbol(const char* symbolName); + + /// @brief Convenience function for C++ophiles. + static void* SearchForAddressOfSymbol(const std::string& symbolName) { + return SearchForAddressOfSymbol(symbolName.c_str()); + } + + /// This functions permanently adds the symbol \p symbolName with the + /// value \p symbolValue. These symbols are searched before any + /// libraries. + /// @brief Add searchable symbol/value pair. + static void AddSymbol(const char* symbolName, void *symbolValue); + + /// @brief Convenience function for C++ophiles. + static void AddSymbol(const std::string& symbolName, void *symbolValue) { + AddSymbol(symbolName.c_str(), symbolValue); + } + + /// @} + /// @name Implementation + /// @{ + protected: + void* handle; // Opaque handle for information about the library + DynamicLibrary(const DynamicLibrary&); ///< Do not implement + DynamicLibrary& operator=(const DynamicLibrary&); ///< Do not implement + /// @} + }; + +} // End sys namespace +} // End llvm namespace + +#endif // LLVM_SYSTEM_DYNAMIC_LIBRARY_H diff --git a/include/llvm/System/Host.h b/include/llvm/System/Host.h new file mode 100644 index 0000000..3c6aa9d --- /dev/null +++ b/include/llvm/System/Host.h @@ -0,0 +1,47 @@ +//===- llvm/System/Host.h - Host machine characteristics --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Methods for querying the nature of the host machine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_HOST_H +#define LLVM_SYSTEM_HOST_H + +#include <string> + +namespace llvm { +namespace sys { + + inline bool isLittleEndianHost() { + union { + int i; + char c; + }; + i = 1; + return c; + } + + inline bool isBigEndianHost() { + return !isLittleEndianHost(); + } + + /// getHostTriple() - Return the target triple of the running + /// system. + /// + /// The target triple is a string in the format of: + /// CPU_TYPE-VENDOR-OPERATING_SYSTEM + /// or + /// CPU_TYPE-VENDOR-KERNEL-OPERATING_SYSTEM + std::string getHostTriple(); + +} +} + +#endif diff --git a/include/llvm/System/IncludeFile.h b/include/llvm/System/IncludeFile.h new file mode 100644 index 0000000..3268ea2 --- /dev/null +++ b/include/llvm/System/IncludeFile.h @@ -0,0 +1,79 @@ +//===- llvm/System/IncludeFile.h - Ensure Linking Of Library ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the FORCE_DEFINING_FILE_TO_BE_LINKED and DEFINE_FILE_FOR +// macros. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_INCLUDEFILE_H +#define LLVM_SYSTEM_INCLUDEFILE_H + +/// This macro is the public interface that IncludeFile.h exports. This gives +/// us the option to implement the "link the definition" capability in any +/// manner that we choose. All header files that depend on a specific .cpp +/// file being linked at run time should use this macro instead of the +/// IncludeFile class directly. +/// +/// For example, foo.h would use:<br/> +/// <tt>FORCE_DEFINING_FILE_TO_BE_LINKED(foo)</tt><br/> +/// +/// And, foo.cp would use:<br/> +/// <tt>DEFINING_FILE_FOR(foo)</tt><br/> +#ifdef __GNUC__ +// If the `used' attribute is available, use it to create a variable +// with an initializer that will force the linking of the defining file. +#define FORCE_DEFINING_FILE_TO_BE_LINKED(name) \ + namespace llvm { \ + extern const char name ## LinkVar; \ + __attribute__((used)) static const char *const name ## LinkObj = \ + &name ## LinkVar; \ + } +#else +// Otherwise use a constructor call. +#define FORCE_DEFINING_FILE_TO_BE_LINKED(name) \ + namespace llvm { \ + extern const char name ## LinkVar; \ + static const IncludeFile name ## LinkObj ( &name ## LinkVar ); \ + } +#endif + +/// This macro is the counterpart to FORCE_DEFINING_FILE_TO_BE_LINKED. It should +/// be used in a .cpp file to define the name referenced in a header file that +/// will cause linkage of the .cpp file. It should only be used at extern level. +#define DEFINING_FILE_FOR(name) \ + namespace llvm { const char name ## LinkVar = 0; } + +namespace llvm { + +/// This class is used in the implementation of FORCE_DEFINING_FILE_TO_BE_LINKED +/// macro to make sure that the implementation of a header file is included +/// into a tool that uses the header. This is solely +/// to overcome problems linking .a files and not getting the implementation +/// of compilation units we need. This is commonly an issue with the various +/// Passes but also occurs elsewhere in LLVM. We like to use .a files because +/// they link faster and provide the smallest executables. However, sometimes +/// those executables are too small, if the program doesn't reference something +/// that might be needed, especially by a loaded share object. This little class +/// helps to resolve that problem. The basic strategy is to use this class in +/// a header file and pass the address of a variable to the constructor. If the +/// variable is defined in the header file's corresponding .cpp file then all +/// tools/libraries that \#include the header file will require the .cpp as +/// well. +/// For example:<br/> +/// <tt>extern int LinkMyCodeStub;</tt><br/> +/// <tt>static IncludeFile LinkMyModule(&LinkMyCodeStub);</tt><br/> +/// @brief Class to ensure linking of corresponding object file. +struct IncludeFile { + explicit IncludeFile(const void *); +}; + +} + +#endif diff --git a/include/llvm/System/LICENSE.TXT b/include/llvm/System/LICENSE.TXT new file mode 100644 index 0000000..f569da2 --- /dev/null +++ b/include/llvm/System/LICENSE.TXT @@ -0,0 +1,6 @@ +LLVM System Interface Library +------------------------------------------------------------------------------- +The LLVM System Interface Library is licensed under the Illinois Open Source +License and has the following additional copyright: + +Copyright (C) 2004 eXtensible Systems, Inc. diff --git a/include/llvm/System/Memory.h b/include/llvm/System/Memory.h new file mode 100644 index 0000000..136dc8a --- /dev/null +++ b/include/llvm/System/Memory.h @@ -0,0 +1,94 @@ +//===- llvm/System/Memory.h - Memory Support --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the llvm::sys::Memory class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_MEMORY_H +#define LLVM_SYSTEM_MEMORY_H + +#include <string> + +namespace llvm { +namespace sys { + + /// This class encapsulates the notion of a memory block which has an address + /// and a size. It is used by the Memory class (a friend) as the result of + /// various memory allocation operations. + /// @see Memory + /// @brief Memory block abstraction. + class MemoryBlock { + public: + void *base() const { return Address; } + unsigned size() const { return Size; } + private: + void *Address; ///< Address of first byte of memory area + unsigned Size; ///< Size, in bytes of the memory area + friend class Memory; + }; + + /// This class provides various memory handling functions that manipulate + /// MemoryBlock instances. + /// @since 1.4 + /// @brief An abstraction for memory operations. + class Memory { + public: + /// This method allocates a block of Read/Write/Execute memory that is + /// suitable for executing dynamically generated code (e.g. JIT). An + /// attempt to allocate \p NumBytes bytes of virtual memory is made. + /// \p NearBlock may point to an existing allocation in which case + /// an attempt is made to allocate more memory near the existing block. + /// + /// On success, this returns a non-null memory block, otherwise it returns + /// a null memory block and fills in *ErrMsg. + /// + /// @brief Allocate Read/Write/Execute memory. + static MemoryBlock AllocateRWX(unsigned NumBytes, + const MemoryBlock *NearBlock, + std::string *ErrMsg = 0); + + /// This method releases a block of Read/Write/Execute memory that was + /// allocated with the AllocateRWX method. It should not be used to + /// release any memory block allocated any other way. + /// + /// On success, this returns false, otherwise it returns true and fills + /// in *ErrMsg. + /// @throws std::string if an error occurred. + /// @brief Release Read/Write/Execute memory. + static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = 0); + + + /// InvalidateInstructionCache - Before the JIT can run a block of code + /// that has been emitted it must invalidate the instruction cache on some + /// platforms. + static void InvalidateInstructionCache(const void *Addr, size_t Len); + + /// setExecutable - Before the JIT can run a block of code, it has to be + /// given read and executable privilege. Return true if it is already r-x + /// or the system is able to change its previlege. + static bool setExecutable (MemoryBlock &M, std::string *ErrMsg = 0); + + /// setWritable - When adding to a block of code, the JIT may need + /// to mark a block of code as RW since the protections are on page + /// boundaries, and the JIT internal allocations are not page aligned. + static bool setWritable (MemoryBlock &M, std::string *ErrMsg = 0); + + /// setRangeExecutable - Mark the page containing a range of addresses + /// as executable. + static bool setRangeExecutable(const void *Addr, size_t Size); + + /// setRangeWritable - Mark the page containing a range of addresses + /// as writable. + static bool setRangeWritable(const void *Addr, size_t Size); + }; +} +} + +#endif diff --git a/include/llvm/System/Mutex.h b/include/llvm/System/Mutex.h new file mode 100644 index 0000000..4f38493 --- /dev/null +++ b/include/llvm/System/Mutex.h @@ -0,0 +1,84 @@ +//===- llvm/System/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the llvm::sys::Mutex class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_MUTEX_H +#define LLVM_SYSTEM_MUTEX_H + +namespace llvm +{ + namespace sys + { + /// @brief Platform agnostic Mutex class. + class Mutex + { + /// @name Constructors + /// @{ + public: + + /// Initializes the lock but doesn't acquire it. if \p recursive is set + /// to false, the lock will not be recursive which makes it cheaper but + /// also more likely to deadlock (same thread can't acquire more than + /// once). + /// @brief Default Constructor. + explicit Mutex(bool recursive = true); + + /// Releases and removes the lock + /// @brief Destructor + ~Mutex(); + + /// @} + /// @name Methods + /// @{ + public: + + /// Attempts to unconditionally acquire the lock. If the lock is held by + /// another thread, this method will wait until it can acquire the lock. + /// @returns false if any kind of error occurs, true otherwise. + /// @brief Unconditionally acquire the lock. + bool acquire(); + + /// Attempts to release the lock. If the lock is held by the current + /// thread, the lock is released allowing other threads to acquire the + /// lock. + /// @returns false if any kind of error occurs, true otherwise. + /// @brief Unconditionally release the lock. + bool release(); + + /// Attempts to acquire the lock without blocking. If the lock is not + /// available, this function returns false quickly (without blocking). If + /// the lock is available, it is acquired. + /// @returns false if any kind of error occurs or the lock is not + /// available, true otherwise. + /// @brief Try to acquire the lock. + bool tryacquire(); + + //@} + /// @name Platform Dependent Data + /// @{ + private: +#ifdef ENABLE_THREADS + void* data_; ///< We don't know what the data will be +#endif + + /// @} + /// @name Do Not Implement + /// @{ + private: + Mutex(const Mutex & original); + void operator=(const Mutex &); + /// @} + }; + } +} + +#endif diff --git a/include/llvm/System/Path.h b/include/llvm/System/Path.h new file mode 100644 index 0000000..de2f173 --- /dev/null +++ b/include/llvm/System/Path.h @@ -0,0 +1,721 @@ +//===- llvm/System/Path.h - Path Operating System Concept -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the llvm::sys::Path class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_PATH_H +#define LLVM_SYSTEM_PATH_H + +#include "llvm/System/TimeValue.h" +#include <set> +#include <string> +#include <vector> +#include <iosfwd> + +namespace llvm { +namespace sys { + + /// This structure provides basic file system information about a file. It + /// is patterned after the stat(2) Unix operating system call but made + /// platform independent and eliminates many of the unix-specific fields. + /// However, to support llvm-ar, the mode, user, and group fields are + /// retained. These pertain to unix security and may not have a meaningful + /// value on non-Unix platforms. However, the other fields fields should + /// always be applicable on all platforms. The structure is filled in by + /// the PathWithStatus class. + /// @brief File status structure + class FileStatus { + public: + uint64_t fileSize; ///< Size of the file in bytes + TimeValue modTime; ///< Time of file's modification + uint32_t mode; ///< Mode of the file, if applicable + uint32_t user; ///< User ID of owner, if applicable + uint32_t group; ///< Group ID of owner, if applicable + uint64_t uniqueID; ///< A number to uniquely ID this file + bool isDir : 1; ///< True if this is a directory. + bool isFile : 1; ///< True if this is a file. + + FileStatus() : fileSize(0), modTime(0,0), mode(0777), user(999), + group(999), uniqueID(0), isDir(false), isFile(false) { } + + TimeValue getTimestamp() const { return modTime; } + uint64_t getSize() const { return fileSize; } + uint32_t getMode() const { return mode; } + uint32_t getUser() const { return user; } + uint32_t getGroup() const { return group; } + uint64_t getUniqueID() const { return uniqueID; } + }; + + /// This class provides an abstraction for the path to a file or directory + /// in the operating system's filesystem and provides various basic operations + /// on it. Note that this class only represents the name of a path to a file + /// or directory which may or may not be valid for a given machine's file + /// system. The class is patterned after the java.io.File class with various + /// extensions and several omissions (not relevant to LLVM). A Path object + /// ensures that the path it encapsulates is syntactically valid for the + /// operating system it is running on but does not ensure correctness for + /// any particular file system. That is, a syntactically valid path might + /// specify path components that do not exist in the file system and using + /// such a Path to act on the file system could produce errors. There is one + /// invalid Path value which is permitted: the empty path. The class should + /// never allow a syntactically invalid non-empty path name to be assigned. + /// Empty paths are required in order to indicate an error result in some + /// situations. If the path is empty, the isValid operation will return + /// false. All operations will fail if isValid is false. Operations that + /// change the path will either return false if it would cause a syntactically + /// invalid path name (in which case the Path object is left unchanged) or + /// throw an std::string exception indicating the error. The methods are + /// grouped into four basic categories: Path Accessors (provide information + /// about the path without accessing disk), Disk Accessors (provide + /// information about the underlying file or directory), Path Mutators + /// (change the path information, not the disk), and Disk Mutators (change + /// the disk file/directory referenced by the path). The Disk Mutator methods + /// all have the word "disk" embedded in their method name to reinforce the + /// notion that the operation modifies the file system. + /// @since 1.4 + /// @brief An abstraction for operating system paths. + class Path { + /// @name Constructors + /// @{ + public: + /// Construct a path to the root directory of the file system. The root + /// directory is a top level directory above which there are no more + /// directories. For example, on UNIX, the root directory is /. On Windows + /// it is C:\. Other operating systems may have different notions of + /// what the root directory is or none at all. In that case, a consistent + /// default root directory will be used. + static Path GetRootDirectory(); + + /// Construct a path to a unique temporary directory that is created in + /// a "standard" place for the operating system. The directory is + /// guaranteed to be created on exit from this function. If the directory + /// cannot be created, the function will throw an exception. + /// @returns an invalid path (empty) on error + /// @param ErrMsg Optional place for an error message if an error occurs + /// @brief Constrct a path to an new, unique, existing temporary + /// directory. + static Path GetTemporaryDirectory(std::string* ErrMsg = 0); + + /// Construct a vector of sys::Path that contains the "standard" system + /// library paths suitable for linking into programs. This function *must* + /// return the value of LLVM_LIB_SEARCH_PATH as the first item in \p Paths + /// if that environment variable is set and it references a directory. + /// @brief Construct a path to the system library directory + static void GetSystemLibraryPaths(std::vector<sys::Path>& Paths); + + /// Construct a vector of sys::Path that contains the "standard" bitcode + /// library paths suitable for linking into an llvm program. This function + /// *must* return the value of LLVM_LIB_SEARCH_PATH as well as the value + /// of LLVM_LIBDIR. It also must provide the System library paths as + /// returned by GetSystemLibraryPaths. + /// @see GetSystemLibraryPaths + /// @brief Construct a list of directories in which bitcode could be + /// found. + static void GetBitcodeLibraryPaths(std::vector<sys::Path>& Paths); + + /// Find the path to a library using its short name. Use the system + /// dependent library paths to locate the library. + /// @brief Find a library. + static Path FindLibrary(std::string& short_name); + + /// Construct a path to the default LLVM configuration directory. The + /// implementation must ensure that this is a well-known (same on many + /// systems) directory in which llvm configuration files exist. For + /// example, on Unix, the /etc/llvm directory has been selected. + /// @brief Construct a path to the default LLVM configuration directory + static Path GetLLVMDefaultConfigDir(); + + /// Construct a path to the LLVM installed configuration directory. The + /// implementation must ensure that this refers to the "etc" directory of + /// the LLVM installation. This is the location where configuration files + /// will be located for a particular installation of LLVM on a machine. + /// @brief Construct a path to the LLVM installed configuration directory + static Path GetLLVMConfigDir(); + + /// Construct a path to the current user's home directory. The + /// implementation must use an operating system specific mechanism for + /// determining the user's home directory. For example, the environment + /// variable "HOME" could be used on Unix. If a given operating system + /// does not have the concept of a user's home directory, this static + /// constructor must provide the same result as GetRootDirectory. + /// @brief Construct a path to the current user's "home" directory + static Path GetUserHomeDirectory(); + + /// Construct a path to the current directory for the current process. + /// @returns The current working directory. + /// @brief Returns the current working directory. + static Path GetCurrentDirectory(); + + /// Return the suffix commonly used on file names that contain a shared + /// object, shared archive, or dynamic link library. Such files are + /// linked at runtime into a process and their code images are shared + /// between processes. + /// @returns The dynamic link library suffix for the current platform. + /// @brief Return the dynamic link library suffix. + static std::string GetDLLSuffix(); + + /// GetMainExecutable - Return the path to the main executable, given the + /// value of argv[0] from program startup and the address of main itself. + static Path GetMainExecutable(const char *argv0, void *MainAddr); + + /// This is one of the very few ways in which a path can be constructed + /// with a syntactically invalid name. The only *legal* invalid name is an + /// empty one. Other invalid names are not permitted. Empty paths are + /// provided so that they can be used to indicate null or error results in + /// other lib/System functionality. + /// @brief Construct an empty (and invalid) path. + Path() : path() {} + Path(const Path &that) : path(that.path) {} + + /// This constructor will accept a std::string as a path. No checking is + /// done on this path to determine if it is valid. To determine validity + /// of the path, use the isValid method. + /// @param p The path to assign. + /// @brief Construct a Path from a string. + explicit Path(const std::string& p); + + /// This constructor will accept a character range as a path. No checking + /// is done on this path to determine if it is valid. To determine + /// validity of the path, use the isValid method. + /// @param StrStart A pointer to the first character of the path name + /// @param StrLen The length of the path name at StrStart + /// @brief Construct a Path from a string. + Path(const char *StrStart, unsigned StrLen); + + /// @} + /// @name Operators + /// @{ + public: + /// Makes a copy of \p that to \p this. + /// @returns \p this + /// @brief Assignment Operator + Path &operator=(const Path &that) { + path = that.path; + return *this; + } + + /// Makes a copy of \p that to \p this. + /// @param \p that A std::string denoting the path + /// @returns \p this + /// @brief Assignment Operator + Path &operator=(const std::string &that); + + /// Compares \p this Path with \p that Path for equality. + /// @returns true if \p this and \p that refer to the same thing. + /// @brief Equality Operator + bool operator==(const Path &that) const; + + /// Compares \p this Path with \p that Path for inequality. + /// @returns true if \p this and \p that refer to different things. + /// @brief Inequality Operator + bool operator!=(const Path &that) const; + + /// Determines if \p this Path is less than \p that Path. This is required + /// so that Path objects can be placed into ordered collections (e.g. + /// std::map). The comparison is done lexicographically as defined by + /// the std::string::compare method. + /// @returns true if \p this path is lexicographically less than \p that. + /// @brief Less Than Operator + bool operator<(const Path& that) const; + + /// @} + /// @name Path Accessors + /// @{ + public: + /// This function will use an operating system specific algorithm to + /// determine if the current value of \p this is a syntactically valid + /// path name for the operating system. The path name does not need to + /// exist, validity is simply syntactical. Empty paths are always invalid. + /// @returns true iff the path name is syntactically legal for the + /// host operating system. + /// @brief Determine if a path is syntactically valid or not. + bool isValid() const; + + /// This function determines if the contents of the path name are empty. + /// That is, the path name has a zero length. This does NOT determine if + /// if the file is empty. To get the length of the file itself, Use the + /// PathWithStatus::getFileStatus() method and then the getSize() method + /// on the returned FileStatus object. + /// @returns true iff the path is empty. + /// @brief Determines if the path name is empty (invalid). + bool isEmpty() const { return path.empty(); } + + /// This function returns the current contents of the path as a + /// std::string. This allows the underlying path string to be manipulated. + /// @returns std::string containing the path name. + /// @brief Returns the path as a std::string. + const std::string &toString() const { return path; } + + /// This function returns the last component of the path name. The last + /// component is the file or directory name occuring after the last + /// directory separator. If no directory separator is present, the entire + /// path name is returned (i.e. same as toString). + /// @returns std::string containing the last component of the path name. + /// @brief Returns the last component of the path name. + std::string getLast() const; + + /// This function strips off the path and suffix of the file or directory + /// name and returns just the basename. For example /a/foo.bar would cause + /// this function to return "foo". + /// @returns std::string containing the basename of the path + /// @brief Get the base name of the path + std::string getBasename() const; + + /// This function strips off the suffix of the path beginning with the + /// path separator ('/' on Unix, '\' on Windows) and returns the result. + std::string getDirname() const; + + /// This function strips off the path and basename(up to and + /// including the last dot) of the file or directory name and + /// returns just the suffix. For example /a/foo.bar would cause + /// this function to return "bar". + /// @returns std::string containing the suffix of the path + /// @brief Get the suffix of the path + std::string getSuffix() const; + + /// Obtain a 'C' string for the path name. + /// @returns a 'C' string containing the path name. + /// @brief Returns the path as a C string. + const char *c_str() const { return path.c_str(); } + + /// size - Return the length in bytes of this path name. + size_t size() const { return path.size(); } + + /// empty - Returns true if the path is empty. + unsigned empty() const { return path.empty(); } + + /// @} + /// @name Disk Accessors + /// @{ + public: + /// This function determines if the path name in this object references + /// the root (top level directory) of the file system. The details of what + /// is considered the "root" may vary from system to system so this method + /// will do the necessary checking. + /// @returns true iff the path name references the root directory. + /// @brief Determines if the path references the root directory. + bool isRootDirectory() const; + + /// This function determines if the path name is absolute, as opposed to + /// relative. + /// @brief Determine if the path is absolute. + bool isAbsolute() const; + + /// This function opens the file associated with the path name provided by + /// the Path object and reads its magic number. If the magic number at the + /// start of the file matches \p magic, true is returned. In all other + /// cases (file not found, file not accessible, etc.) it returns false. + /// @returns true if the magic number of the file matches \p magic. + /// @brief Determine if file has a specific magic number + bool hasMagicNumber(const std::string& magic) const; + + /// This function retrieves the first \p len bytes of the file associated + /// with \p this. These bytes are returned as the "magic number" in the + /// \p Magic parameter. + /// @returns true if the Path is a file and the magic number is retrieved, + /// false otherwise. + /// @brief Get the file's magic number. + bool getMagicNumber(std::string& Magic, unsigned len) const; + + /// This function determines if the path name in the object references an + /// archive file by looking at its magic number. + /// @returns true if the file starts with the magic number for an archive + /// file. + /// @brief Determine if the path references an archive file. + bool isArchive() const; + + /// This function determines if the path name in the object references an + /// LLVM Bitcode file by looking at its magic number. + /// @returns true if the file starts with the magic number for LLVM + /// bitcode files. + /// @brief Determine if the path references a bitcode file. + bool isBitcodeFile() const; + + /// This function determines if the path name in the object references a + /// native Dynamic Library (shared library, shared object) by looking at + /// the file's magic number. The Path object must reference a file, not a + /// directory. + /// @return strue if the file starts with the magid number for a native + /// shared library. + /// @brief Determine if the path reference a dynamic library. + bool isDynamicLibrary() const; + + /// This function determines if the path name references an existing file + /// or directory in the file system. + /// @returns true if the pathname references an existing file or + /// directory. + /// @brief Determines if the path is a file or directory in + /// the file system. + bool exists() const; + + /// This function determines if the path name refences an + /// existing directory. + /// @returns true if the pathname references an existing directory. + /// @brief Determins if the path is a directory in the file system. + bool isDirectory() const; + + /// This function determines if the path name references a readable file + /// or directory in the file system. This function checks for + /// the existence and readability (by the current program) of the file + /// or directory. + /// @returns true if the pathname references a readable file. + /// @brief Determines if the path is a readable file or directory + /// in the file system. + bool canRead() const; + + /// This function determines if the path name references a writable file + /// or directory in the file system. This function checks for the + /// existence and writability (by the current program) of the file or + /// directory. + /// @returns true if the pathname references a writable file. + /// @brief Determines if the path is a writable file or directory + /// in the file system. + bool canWrite() const; + + /// This function determines if the path name references an executable + /// file in the file system. This function checks for the existence and + /// executability (by the current program) of the file. + /// @returns true if the pathname references an executable file. + /// @brief Determines if the path is an executable file in the file + /// system. + bool canExecute() const; + + /// This function builds a list of paths that are the names of the + /// files and directories in a directory. + /// @returns true if an error occurs, true otherwise + /// @brief Build a list of directory's contents. + bool getDirectoryContents( + std::set<Path> &paths, ///< The resulting list of file & directory names + std::string* ErrMsg ///< Optional place to return an error message. + ) const; + + /// @} + /// @name Path Mutators + /// @{ + public: + /// The path name is cleared and becomes empty. This is an invalid + /// path name but is the *only* invalid path name. This is provided + /// so that path objects can be used to indicate the lack of a + /// valid path being found. + /// @brief Make the path empty. + void clear() { path.clear(); } + + /// This method sets the Path object to \p unverified_path. This can fail + /// if the \p unverified_path does not pass the syntactic checks of the + /// isValid() method. If verification fails, the Path object remains + /// unchanged and false is returned. Otherwise true is returned and the + /// Path object takes on the path value of \p unverified_path + /// @returns true if the path was set, false otherwise. + /// @param unverified_path The path to be set in Path object. + /// @brief Set a full path from a std::string + bool set(const std::string& unverified_path); + + /// One path component is removed from the Path. If only one component is + /// present in the path, the Path object becomes empty. If the Path object + /// is empty, no change is made. + /// @returns false if the path component could not be removed. + /// @brief Removes the last directory component of the Path. + bool eraseComponent(); + + /// The \p component is added to the end of the Path if it is a legal + /// name for the operating system. A directory separator will be added if + /// needed. + /// @returns false if the path component could not be added. + /// @brief Appends one path component to the Path. + bool appendComponent( const std::string& component ); + + /// A period and the \p suffix are appended to the end of the pathname. + /// The precondition for this function is that the Path reference a file + /// name (i.e. isFile() returns true). If the Path is not a file, no + /// action is taken and the function returns false. If the path would + /// become invalid for the host operating system, false is returned. + /// @returns false if the suffix could not be added, true if it was. + /// @brief Adds a period and the \p suffix to the end of the pathname. + bool appendSuffix(const std::string& suffix); + + /// The suffix of the filename is erased. The suffix begins with and + /// includes the last . character in the filename after the last directory + /// separator and extends until the end of the name. If no . character is + /// after the last directory separator, then the file name is left + /// unchanged (i.e. it was already without a suffix) but the function + /// returns false. + /// @returns false if there was no suffix to remove, true otherwise. + /// @brief Remove the suffix from a path name. + bool eraseSuffix(); + + /// The current Path name is made unique in the file system. Upon return, + /// the Path will have been changed to make a unique file in the file + /// system or it will not have been changed if the current path name is + /// already unique. + /// @throws std::string if an unrecoverable error occurs. + /// @brief Make the current path name unique in the file system. + bool makeUnique( bool reuse_current /*= true*/, std::string* ErrMsg ); + + /// The current Path name is made absolute by prepending the + /// current working directory if necessary. + void makeAbsolute(); + + /// @} + /// @name Disk Mutators + /// @{ + public: + /// This method attempts to make the file referenced by the Path object + /// available for reading so that the canRead() method will return true. + /// @brief Make the file readable; + bool makeReadableOnDisk(std::string* ErrMsg = 0); + + /// This method attempts to make the file referenced by the Path object + /// available for writing so that the canWrite() method will return true. + /// @brief Make the file writable; + bool makeWriteableOnDisk(std::string* ErrMsg = 0); + + /// This method attempts to make the file referenced by the Path object + /// available for execution so that the canExecute() method will return + /// true. + /// @brief Make the file readable; + bool makeExecutableOnDisk(std::string* ErrMsg = 0); + + /// This method allows the last modified time stamp and permission bits + /// to be set on the disk object referenced by the Path. + /// @throws std::string if an error occurs. + /// @returns true on error. + /// @brief Set the status information. + bool setStatusInfoOnDisk(const FileStatus &SI, + std::string *ErrStr = 0) const; + + /// This method attempts to create a directory in the file system with the + /// same name as the Path object. The \p create_parents parameter controls + /// whether intermediate directories are created or not. if \p + /// create_parents is true, then an attempt will be made to create all + /// intermediate directories, as needed. If \p create_parents is false, + /// then only the final directory component of the Path name will be + /// created. The created directory will have no entries. + /// @returns true if the directory could not be created, false otherwise + /// @brief Create the directory this Path refers to. + bool createDirectoryOnDisk( + bool create_parents = false, ///< Determines whether non-existent + ///< directory components other than the last one (the "parents") + ///< are created or not. + std::string* ErrMsg = 0 ///< Optional place to put error messages. + ); + + /// This method attempts to create a file in the file system with the same + /// name as the Path object. The intermediate directories must all exist + /// at the time this method is called. Use createDirectoriesOnDisk to + /// accomplish that. The created file will be empty upon return from this + /// function. + /// @returns true if the file could not be created, false otherwise. + /// @brief Create the file this Path refers to. + bool createFileOnDisk( + std::string* ErrMsg = 0 ///< Optional place to put error messages. + ); + + /// This is like createFile except that it creates a temporary file. A + /// unique temporary file name is generated based on the contents of + /// \p this before the call. The new name is assigned to \p this and the + /// file is created. Note that this will both change the Path object + /// *and* create the corresponding file. This function will ensure that + /// the newly generated temporary file name is unique in the file system. + /// @returns true if the file couldn't be created, false otherwise. + /// @brief Create a unique temporary file + bool createTemporaryFileOnDisk( + bool reuse_current = false, ///< When set to true, this parameter + ///< indicates that if the current file name does not exist then + ///< it will be used without modification. + std::string* ErrMsg = 0 ///< Optional place to put error messages + ); + + /// This method renames the file referenced by \p this as \p newName. The + /// file referenced by \p this must exist. The file referenced by + /// \p newName does not need to exist. + /// @returns true on error, false otherwise + /// @brief Rename one file as another. + bool renamePathOnDisk(const Path& newName, std::string* ErrMsg); + + /// This method attempts to destroy the file or directory named by the + /// last component of the Path. If the Path refers to a directory and the + /// \p destroy_contents is false, an attempt will be made to remove just + /// the directory (the final Path component). If \p destroy_contents is + /// true, an attempt will be made to remove the entire contents of the + /// directory, recursively. If the Path refers to a file, the + /// \p destroy_contents parameter is ignored. + /// @param destroy_contents Indicates whether the contents of a destroyed + /// @param Err An optional string to receive an error message. + /// directory should also be destroyed (recursively). + /// @returns false if the file/directory was destroyed, true on error. + /// @brief Removes the file or directory from the filesystem. + bool eraseFromDisk(bool destroy_contents = false, + std::string *Err = 0) const; + + + /// MapInFilePages - This is a low level system API to map in the file + /// that is currently opened as FD into the current processes' address + /// space for read only access. This function may return null on failure + /// or if the system cannot provide the following constraints: + /// 1) The pages must be valid after the FD is closed, until + /// UnMapFilePages is called. + /// 2) Any padding after the end of the file must be zero filled, if + /// present. + /// 3) The pages must be contiguous. + /// + /// This API is not intended for general use, clients should use + /// MemoryBuffer::getFile instead. + static const char *MapInFilePages(int FD, uint64_t FileSize); + + /// UnMapFilePages - Free pages mapped into the current process by + /// MapInFilePages. + /// + /// This API is not intended for general use, clients should use + /// MemoryBuffer::getFile instead. + static void UnMapFilePages(const char *Base, uint64_t FileSize); + + /// @} + /// @name Data + /// @{ + protected: + mutable std::string path; ///< Storage for the path name. + + + /// @} + }; + + /// This class is identical to Path class except it allows you to obtain the + /// file status of the Path as well. The reason for the distinction is one of + /// efficiency. First, the file status requires additional space and the space + /// is incorporated directly into PathWithStatus without an additional malloc. + /// Second, obtaining status information is an expensive operation on most + /// operating systems so we want to be careful and explicity about where we + /// allow this operation in LLVM. + /// @brief Path with file status class. + class PathWithStatus : public Path { + /// @name Constructors + /// @{ + public: + /// @brief Default constructor + PathWithStatus() : Path(), status(), fsIsValid(false) {} + + /// @brief Copy constructor + PathWithStatus(const PathWithStatus &that) + : Path(static_cast<const Path&>(that)), status(that.status), + fsIsValid(that.fsIsValid) {} + + /// This constructor allows construction from a Path object + /// @brief Path constructor + PathWithStatus(const Path &other) + : Path(other), status(), fsIsValid(false) {} + + /// This constructor will accept a std::string as a path. No checking is + /// done on this path to determine if it is valid. To determine validity + /// of the path, use the isValid method. + /// @brief Construct a Path from a string. + explicit PathWithStatus( + const std::string& p ///< The path to assign. + ) : Path(p), status(), fsIsValid(false) {} + + /// This constructor will accept a character range as a path. No checking + /// is done on this path to determine if it is valid. To determine + /// validity of the path, use the isValid method. + /// @brief Construct a Path from a string. + explicit PathWithStatus( + const char *StrStart, ///< Pointer to the first character of the path + unsigned StrLen ///< Length of the path. + ) : Path(StrStart, StrLen), status(), fsIsValid(false) {} + + /// Makes a copy of \p that to \p this. + /// @returns \p this + /// @brief Assignment Operator + PathWithStatus &operator=(const PathWithStatus &that) { + static_cast<Path&>(*this) = static_cast<const Path&>(that); + status = that.status; + fsIsValid = that.fsIsValid; + return *this; + } + + /// Makes a copy of \p that to \p this. + /// @returns \p this + /// @brief Assignment Operator + PathWithStatus &operator=(const Path &that) { + static_cast<Path&>(*this) = static_cast<const Path&>(that); + fsIsValid = false; + return *this; + } + + /// @} + /// @name Methods + /// @{ + public: + /// This function returns status information about the file. The type of + /// path (file or directory) is updated to reflect the actual contents + /// of the file system. + /// @returns 0 on failure, with Error explaining why (if non-zero) + /// @returns a pointer to a FileStatus structure on success. + /// @brief Get file status. + const FileStatus *getFileStatus( + bool forceUpdate = false, ///< Force an update from the file system + std::string *Error = 0 ///< Optional place to return an error msg. + ) const; + + /// @} + /// @name Data + /// @{ + private: + mutable FileStatus status; ///< Status information. + mutable bool fsIsValid; ///< Whether we've obtained it or not + + /// @} + }; + + /// This enumeration delineates the kinds of files that LLVM knows about. + enum LLVMFileType { + Unknown_FileType = 0, ///< Unrecognized file + Bitcode_FileType, ///< Bitcode file + Archive_FileType, ///< ar style archive file + ELF_Relocatable_FileType, ///< ELF Relocatable object file + ELF_Executable_FileType, ///< ELF Executable image + ELF_SharedObject_FileType, ///< ELF dynamically linked shared lib + ELF_Core_FileType, ///< ELF core image + Mach_O_Object_FileType, ///< Mach-O Object file + Mach_O_Executable_FileType, ///< Mach-O Executable + Mach_O_FixedVirtualMemorySharedLib_FileType, ///< Mach-O Shared Lib, FVM + Mach_O_Core_FileType, ///< Mach-O Core File + Mach_O_PreloadExectuable_FileType, ///< Mach-O Preloaded Executable + Mach_O_DynamicallyLinkedSharedLib_FileType, ///< Mach-O dynlinked shared lib + Mach_O_DynamicLinker_FileType, ///< The Mach-O dynamic linker + Mach_O_Bundle_FileType, ///< Mach-O Bundle file + Mach_O_DynamicallyLinkedSharedLibStub_FileType, ///< Mach-O Shared lib stub + COFF_FileType ///< COFF object file or lib + }; + + /// This utility function allows any memory block to be examined in order + /// to determine its file type. + LLVMFileType IdentifyFileType(const char*magic, unsigned length); + + /// This function can be used to copy the file specified by Src to the + /// file specified by Dest. If an error occurs, Dest is removed. + /// @returns true if an error occurs, false otherwise + /// @brief Copy one file to another. + bool CopyFile(const Path& Dest, const Path& Src, std::string* ErrMsg); + + /// This is the OS-specific path separator: a colon on Unix or a semicolon + /// on Windows. + extern const char PathSeparator; +} + +std::ostream& operator<<(std::ostream& strm, const sys::Path& aPath); +inline std::ostream& operator<<(std::ostream& strm, + const sys::PathWithStatus& aPath) { + strm << static_cast<const sys::Path&>(aPath); + return strm; +} + +} + +#endif diff --git a/include/llvm/System/Process.h b/include/llvm/System/Process.h new file mode 100644 index 0000000..ce19eb2 --- /dev/null +++ b/include/llvm/System/Process.h @@ -0,0 +1,115 @@ +//===- llvm/System/Process.h ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the llvm::sys::Process class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_PROCESS_H +#define LLVM_SYSTEM_PROCESS_H + +#include "llvm/System/TimeValue.h" + +namespace llvm { +namespace sys { + + /// This class provides an abstraction for getting information about the + /// currently executing process. + /// @since 1.4 + /// @brief An abstraction for operating system processes. + class Process { + /// @name Accessors + /// @{ + public: + /// This static function will return the operating system's virtual memory + /// page size. + /// @returns The number of bytes in a virtual memory page. + /// @throws nothing + /// @brief Get the virtual memory page size + static unsigned GetPageSize(); + + /// This static function will return the total amount of memory allocated + /// by the process. This only counts the memory allocated via the malloc, + /// calloc and realloc functions and includes any "free" holes in the + /// allocated space. + /// @throws nothing + /// @brief Return process memory usage. + static size_t GetMallocUsage(); + + /// This static function will return the total memory usage of the + /// process. This includes code, data, stack and mapped pages usage. Notei + /// that the value returned here is not necessarily the Running Set Size, + /// it is the total virtual memory usage, regardless of mapped state of + /// that memory. + static size_t GetTotalMemoryUsage(); + + /// This static function will set \p user_time to the amount of CPU time + /// spent in user (non-kernel) mode and \p sys_time to the amount of CPU + /// time spent in system (kernel) mode. If the operating system does not + /// support collection of these metrics, a zero TimeValue will be for both + /// values. + static void GetTimeUsage( + TimeValue& elapsed, + ///< Returns the TimeValue::now() giving current time + TimeValue& user_time, + ///< Returns the current amount of user time for the process + TimeValue& sys_time + ///< Returns the current amount of system time for the process + ); + + /// This static function will return the process' current user id number. + /// Not all operating systems support this feature. Where it is not + /// supported, the function should return 65536 as the value. + static int GetCurrentUserId(); + + /// This static function will return the process' current group id number. + /// Not all operating systems support this feature. Where it is not + /// supported, the function should return 65536 as the value. + static int GetCurrentGroupId(); + + /// This function makes the necessary calls to the operating system to + /// prevent core files or any other kind of large memory dumps that can + /// occur when a program fails. + /// @brief Prevent core file generation. + static void PreventCoreFiles(); + + /// This function determines if the standard input is connected directly + /// to a user's input (keyboard probably), rather than coming from a file + /// or pipe. + static bool StandardInIsUserInput(); + + /// This function determines if the standard output is connected to a + /// "tty" or "console" window. That is, the output would be displayed to + /// the user rather than being put on a pipe or stored in a file. + static bool StandardOutIsDisplayed(); + + /// This function determines if the standard error is connected to a + /// "tty" or "console" window. That is, the output would be displayed to + /// the user rather than being put on a pipe or stored in a file. + static bool StandardErrIsDisplayed(); + + /// This function determines the number of columns in the window + /// if standard output is connected to a "tty" or "console" + /// window. If standard output is not connected to a tty or + /// console, or if the number of columns cannot be determined, + /// this routine returns zero. + static unsigned StandardOutColumns(); + + /// This function determines the number of columns in the window + /// if standard error is connected to a "tty" or "console" + /// window. If standard error is not connected to a tty or + /// console, or if the number of columns cannot be determined, + /// this routine returns zero. + static unsigned StandardErrColumns(); + /// @} + }; +} +} + +#endif diff --git a/include/llvm/System/Program.h b/include/llvm/System/Program.h new file mode 100644 index 0000000..37f5546 --- /dev/null +++ b/include/llvm/System/Program.h @@ -0,0 +1,94 @@ +//===- llvm/System/Program.h ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the llvm::sys::Program class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_PROGRAM_H +#define LLVM_SYSTEM_PROGRAM_H + +#include "llvm/System/Path.h" + +namespace llvm { +namespace sys { + + /// This class provides an abstraction for programs that are executable by the + /// operating system. It provides a platform generic way to find executable + /// programs from the path and to execute them in various ways. The sys::Path + /// class is used to specify the location of the Program. + /// @since 1.4 + /// @brief An abstraction for finding and executing programs. + class Program { + /// @name Methods + /// @{ + public: + /// This static constructor (factory) will attempt to locate a program in + /// the operating system's file system using some pre-determined set of + /// locations to search (e.g. the PATH on Unix). + /// @returns A Path object initialized to the path of the program or a + /// Path object that is empty (invalid) if the program could not be found. + /// @throws nothing + /// @brief Construct a Program by finding it by name. + static Path FindProgramByName(const std::string& name); + + /// This function executes the program using the \p arguments provided and + /// waits for the program to exit. This function will block the current + /// program until the invoked program exits. The invoked program will + /// inherit the stdin, stdout, and stderr file descriptors, the + /// environment and other configuration settings of the invoking program. + /// If Path::executable() does not return true when this function is + /// called then a std::string is thrown. + /// @returns an integer result code indicating the status of the program. + /// A zero or positive value indicates the result code of the program. A + /// negative value is the signal number on which it terminated. + /// @see FindProgrambyName + /// @brief Executes the program with the given set of \p args. + static int ExecuteAndWait( + const Path& path, ///< sys::Path object providing the path of the + ///< program to be executed. It is presumed this is the result of + ///< the FindProgramByName method. + const char** args, ///< A vector of strings that are passed to the + ///< program. The first element should be the name of the program. + ///< The list *must* be terminated by a null char* entry. + const char ** env = 0, ///< An optional vector of strings to use for + ///< the program's environment. If not provided, the current program's + ///< environment will be used. + const sys::Path** redirects = 0, ///< An optional array of pointers to + ///< Paths. If the array is null, no redirection is done. The array + ///< should have a size of at least three. If the pointer in the array + ///< are not null, then the inferior process's stdin(0), stdout(1), + ///< and stderr(2) will be redirected to the corresponding Paths. + ///< When an empty Path is passed in, the corresponding file + ///< descriptor will be disconnected (ie, /dev/null'd) in a portable + ///< way. + unsigned secondsToWait = 0, ///< If non-zero, this specifies the amount + ///< of time to wait for the child process to exit. If the time + ///< expires, the child is killed and this call returns. If zero, + ///< this function will wait until the child finishes or forever if + ///< it doesn't. + unsigned memoryLimit = 0, ///< If non-zero, this specifies max. amount + ///< of memory can be allocated by process. If memory usage will be + ///< higher limit, the child is killed and this call returns. If zero + ///< - no memory limit. + std::string* ErrMsg = 0 ///< If non-zero, provides a pointer to a string + ///< instance in which error messages will be returned. If the string + ///< is non-empty upon return an error occurred while invoking the + ///< program. + ); + // These methods change the specified standard stream (stdin or stdout) to + // binary mode. They return true if an error occurred + static bool ChangeStdinToBinary(); + static bool ChangeStdoutToBinary(); + /// @} + }; +} +} + +#endif diff --git a/include/llvm/System/Signals.h b/include/llvm/System/Signals.h new file mode 100644 index 0000000..2b9d8ca --- /dev/null +++ b/include/llvm/System/Signals.h @@ -0,0 +1,51 @@ +//===- llvm/System/Signals.h - Signal Handling support ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines some helpful functions for dealing with the possibility of +// unix signals occuring while your program is running. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SYSTEM_SIGNALS_H +#define LLVM_SYSTEM_SIGNALS_H + +#include "llvm/System/Path.h" + +namespace llvm { +namespace sys { + + /// This function registers signal handlers to ensure that if a signal gets + /// delivered that the named file is removed. + /// @brief Remove a file if a fatal signal occurs. + bool RemoveFileOnSignal(const Path &Filename, std::string* ErrMsg = 0); + + /// When an error signal (such as SIBABRT or SIGSEGV) is delivered to the + /// process, print a stack trace and then exit. + /// @brief Print a stack trace if a fatal signal occurs. + void PrintStackTraceOnErrorSignal(); + + /// AddSignalHandler - Add a function to be called when an abort/kill signal + /// is delivered to the process. The handler can have a cookie passed to it + /// to identify what instance of the handler it is. + void AddSignalHandler(void (*FnPtr)(void *), void *Cookie); + + /// This function registers a function to be called when the user "interrupts" + /// the program (typically by pressing ctrl-c). When the user interrupts the + /// program, the specified interrupt function is called instead of the program + /// being killed, and the interrupt function automatically disabled. Note + /// that interrupt functions are not allowed to call any non-reentrant + /// functions. An null interrupt function pointer disables the current + /// installed function. Note also that the handler may be executed on a + /// different thread on some platforms. + /// @brief Register a function to be called when ctrl-c is pressed. + void SetInterruptFunction(void (*IF)()); +} // End sys namespace +} // End llvm namespace + +#endif diff --git a/include/llvm/System/Solaris.h b/include/llvm/System/Solaris.h new file mode 100644 index 0000000..15adb74 --- /dev/null +++ b/include/llvm/System/Solaris.h @@ -0,0 +1,40 @@ +/*===- llvm/System/Solaris.h ------------------------------------*- C++ -*-===* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file contains portability fixes for Solaris hosts. + * + *===----------------------------------------------------------------------===*/ + +#ifndef LLVM_SYSTEM_SOLARIS_H +#define LLVM_SYSTEM_SOLARIS_H + +#include <sys/types.h> +#include <sys/regset.h> + +#undef CS +#undef DS +#undef ES +#undef FS +#undef GS +#undef SS +#undef EAX +#undef ECX +#undef EDX +#undef EBX +#undef ESP +#undef EBP +#undef ESI +#undef EDI +#undef EIP +#undef UESP +#undef EFL +#undef ERR +#undef TRAPNO + +#endif diff --git a/include/llvm/System/TimeValue.h b/include/llvm/System/TimeValue.h new file mode 100644 index 0000000..b9ada00 --- /dev/null +++ b/include/llvm/System/TimeValue.h @@ -0,0 +1,382 @@ +//===-- TimeValue.h - Declare OS TimeValue Concept --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file declares the operating system TimeValue concept. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/DataTypes.h" +#include <string> + +#ifndef LLVM_SYSTEM_TIMEVALUE_H +#define LLVM_SYSTEM_TIMEVALUE_H + +namespace llvm { +namespace sys { + /// This class is used where a precise fixed point in time is required. The + /// range of TimeValue spans many hundreds of billions of years both past and + /// present. The precision of TimeValue is to the nanosecond. However, the + /// actual precision of its values will be determined by the resolution of + /// the system clock. The TimeValue class is used in conjunction with several + /// other lib/System interfaces to specify the time at which a call should + /// timeout, etc. + /// @since 1.4 + /// @brief Provides an abstraction for a fixed point in time. + class TimeValue { + + /// @name Constants + /// @{ + public: + + /// A constant TimeValue representing the smallest time + /// value permissable by the class. MinTime is some point + /// in the distant past, about 300 billion years BCE. + /// @brief The smallest possible time value. + static const TimeValue MinTime; + + /// A constant TimeValue representing the largest time + /// value permissable by the class. MaxTime is some point + /// in the distant future, about 300 billion years AD. + /// @brief The largest possible time value. + static const TimeValue MaxTime; + + /// A constant TimeValue representing the base time, + /// or zero time of 00:00:00 (midnight) January 1st, 2000. + /// @brief 00:00:00 Jan 1, 2000 UTC. + static const TimeValue ZeroTime; + + /// A constant TimeValue for the Posix base time which is + /// 00:00:00 (midnight) January 1st, 1970. + /// @brief 00:00:00 Jan 1, 1970 UTC. + static const TimeValue PosixZeroTime; + + /// A constant TimeValue for the Win32 base time which is + /// 00:00:00 (midnight) January 1st, 1601. + /// @brief 00:00:00 Jan 1, 1601 UTC. + static const TimeValue Win32ZeroTime; + + /// @} + /// @name Types + /// @{ + public: + typedef int64_t SecondsType; ///< Type used for representing seconds. + typedef int32_t NanoSecondsType; ///< Type used for representing nanoseconds. + + enum TimeConversions { + NANOSECONDS_PER_SECOND = 1000000000, ///< One Billion + MICROSECONDS_PER_SECOND = 1000000, ///< One Million + MILLISECONDS_PER_SECOND = 1000, ///< One Thousand + NANOSECONDS_PER_MICROSECOND = 1000, ///< One Thousand + NANOSECONDS_PER_MILLISECOND = 1000000,///< One Million + NANOSECONDS_PER_POSIX_TICK = 100, ///< Posix tick is 100 Hz (10ms) + NANOSECONDS_PER_WIN32_TICK = 100 ///< Win32 tick is 100 Hz (10ms) + }; + + /// @} + /// @name Constructors + /// @{ + public: + /// Caller provides the exact value in seconds and nanoseconds. The + /// \p nanos argument defaults to zero for convenience. + /// @brief Explicit constructor + explicit TimeValue (SecondsType seconds, NanoSecondsType nanos = 0) + : seconds_( seconds ), nanos_( nanos ) { this->normalize(); } + + /// Caller provides the exact value as a double in seconds with the + /// fractional part representing nanoseconds. + /// @brief Double Constructor. + explicit TimeValue( double new_time ) + : seconds_( 0 ) , nanos_ ( 0 ) { + SecondsType integer_part = static_cast<SecondsType>( new_time ); + seconds_ = integer_part; + nanos_ = static_cast<NanoSecondsType>( (new_time - + static_cast<double>(integer_part)) * NANOSECONDS_PER_SECOND ); + this->normalize(); + } + + /// This is a static constructor that returns a TimeValue that represents + /// the current time. + /// @brief Creates a TimeValue with the current time (UTC). + static TimeValue now(); + + /// @} + /// @name Operators + /// @{ + public: + /// Add \p that to \p this. + /// @returns this + /// @brief Incrementing assignment operator. + TimeValue& operator += (const TimeValue& that ) { + this->seconds_ += that.seconds_ ; + this->nanos_ += that.nanos_ ; + this->normalize(); + return *this; + } + + /// Subtract \p that from \p this. + /// @returns this + /// @brief Decrementing assignment operator. + TimeValue& operator -= (const TimeValue &that ) { + this->seconds_ -= that.seconds_ ; + this->nanos_ -= that.nanos_ ; + this->normalize(); + return *this; + } + + /// Determine if \p this is less than \p that. + /// @returns True iff *this < that. + /// @brief True if this < that. + int operator < (const TimeValue &that) const { return that > *this; } + + /// Determine if \p this is greather than \p that. + /// @returns True iff *this > that. + /// @brief True if this > that. + int operator > (const TimeValue &that) const { + if ( this->seconds_ > that.seconds_ ) { + return 1; + } else if ( this->seconds_ == that.seconds_ ) { + if ( this->nanos_ > that.nanos_ ) return 1; + } + return 0; + } + + /// Determine if \p this is less than or equal to \p that. + /// @returns True iff *this <= that. + /// @brief True if this <= that. + int operator <= (const TimeValue &that) const { return that >= *this; } + + /// Determine if \p this is greater than or equal to \p that. + /// @returns True iff *this >= that. + /// @brief True if this >= that. + int operator >= (const TimeValue &that) const { + if ( this->seconds_ > that.seconds_ ) { + return 1; + } else if ( this->seconds_ == that.seconds_ ) { + if ( this->nanos_ >= that.nanos_ ) return 1; + } + return 0; + } + + /// Determines if two TimeValue objects represent the same moment in time. + /// @brief True iff *this == that. + /// @brief True if this == that. + int operator == (const TimeValue &that) const { + return (this->seconds_ == that.seconds_) && + (this->nanos_ == that.nanos_); + } + + /// Determines if two TimeValue objects represent times that are not the + /// same. + /// @return True iff *this != that. + /// @brief True if this != that. + int operator != (const TimeValue &that) const { return !(*this == that); } + + /// Adds two TimeValue objects together. + /// @returns The sum of the two operands as a new TimeValue + /// @brief Addition operator. + friend TimeValue operator + (const TimeValue &tv1, const TimeValue &tv2); + + /// Subtracts two TimeValue objects. + /// @returns The difference of the two operands as a new TimeValue + /// @brief Subtraction operator. + friend TimeValue operator - (const TimeValue &tv1, const TimeValue &tv2); + + /// @} + /// @name Accessors + /// @{ + public: + + /// Returns only the seconds component of the TimeValue. The nanoseconds + /// portion is ignored. No rounding is performed. + /// @brief Retrieve the seconds component + SecondsType seconds() const { return seconds_; } + + /// Returns only the nanoseconds component of the TimeValue. The seconds + /// portion is ignored. + /// @brief Retrieve the nanoseconds component. + NanoSecondsType nanoseconds() const { return nanos_; } + + /// Returns only the fractional portion of the TimeValue rounded down to the + /// nearest microsecond (divide by one thousand). + /// @brief Retrieve the fractional part as microseconds; + uint32_t microseconds() const { + return nanos_ / NANOSECONDS_PER_MICROSECOND; + } + + /// Returns only the fractional portion of the TimeValue rounded down to the + /// nearest millisecond (divide by one million). + /// @brief Retrieve the fractional part as milliseconds; + uint32_t milliseconds() const { + return nanos_ / NANOSECONDS_PER_MILLISECOND; + } + + /// Returns the TimeValue as a number of microseconds. Note that the value + /// returned can overflow because the range of a uint64_t is smaller than + /// the range of a TimeValue. Nevertheless, this is useful on some operating + /// systems and is therefore provided. + /// @brief Convert to a number of microseconds (can overflow) + uint64_t usec() const { + return seconds_ * MICROSECONDS_PER_SECOND + + ( nanos_ / NANOSECONDS_PER_MICROSECOND ); + } + + /// Returns the TimeValue as a number of milliseconds. Note that the value + /// returned can overflow because the range of a uint64_t is smaller than + /// the range of a TimeValue. Nevertheless, this is useful on some operating + /// systems and is therefore provided. + /// @brief Convert to a number of milliseconds (can overflow) + uint64_t msec() const { + return seconds_ * MILLISECONDS_PER_SECOND + + ( nanos_ / NANOSECONDS_PER_MILLISECOND ); + } + + /// Converts the TimeValue into the corresponding number of "ticks" for + /// Posix, correcting for the difference in Posix zero time. + /// @brief Convert to unix time (100 nanoseconds since 12:00:00a Jan 1,1970) + uint64_t toPosixTime() const { + uint64_t result = seconds_ - PosixZeroTime.seconds_; + result += nanos_ / NANOSECONDS_PER_POSIX_TICK; + return result; + } + + /// Converts the TimeValue into the corresponding number of seconds + /// since the epoch (00:00:00 Jan 1,1970). + uint64_t toEpochTime() const { + return seconds_ - PosixZeroTime.seconds_; + } + + /// Converts the TiemValue into the correspodning number of "ticks" for + /// Win32 platforms, correcting for the difference in Win32 zero time. + /// @brief Convert to windows time (seconds since 12:00:00a Jan 1, 1601) + uint64_t toWin32Time() const { + uint64_t result = seconds_ - Win32ZeroTime.seconds_; + result += nanos_ / NANOSECONDS_PER_WIN32_TICK; + return result; + } + + /// Provides the seconds and nanoseconds as results in its arguments after + /// correction for the Posix zero time. + /// @brief Convert to timespec time (ala POSIX.1b) + void getTimespecTime( uint64_t& seconds, uint32_t& nanos ) const { + seconds = seconds_ - PosixZeroTime.seconds_; + nanos = nanos_; + } + + /// Provides conversion of the TimeValue into a readable time & date. + /// @returns std::string containing the readable time value + /// @brief Convert time to a string. + std::string toString() const; + + /// @} + /// @name Mutators + /// @{ + public: + /// The seconds component of the TimeValue is set to \p sec without + /// modifying the nanoseconds part. This is useful for whole second + /// arithmetic. + /// @brief Set the seconds component. + void seconds (SecondsType sec ) { + this->seconds_ = sec; + this->normalize(); + } + + /// The nanoseconds component of the TimeValue is set to \p nanos without + /// modifying the seconds part. This is useful for basic computations + /// involving just the nanoseconds portion. Note that the TimeValue will be + /// normalized after this call so that the fractional (nanoseconds) portion + /// will have the smallest equivalent value. + /// @brief Set the nanoseconds component using a number of nanoseconds. + void nanoseconds ( NanoSecondsType nanos ) { + this->nanos_ = nanos; + this->normalize(); + } + + /// The seconds component remains unchanged. + /// @brief Set the nanoseconds component using a number of microseconds. + void microseconds ( int32_t micros ) { + this->nanos_ = micros * NANOSECONDS_PER_MICROSECOND; + this->normalize(); + } + + /// The seconds component remains unchanged. + /// @brief Set the nanoseconds component using a number of milliseconds. + void milliseconds ( int32_t millis ) { + this->nanos_ = millis * NANOSECONDS_PER_MILLISECOND; + this->normalize(); + } + + /// @brief Converts from microsecond format to TimeValue format + void usec( int64_t microseconds ) { + this->seconds_ = microseconds / MICROSECONDS_PER_SECOND; + this->nanos_ = NanoSecondsType(microseconds % MICROSECONDS_PER_SECOND) * + NANOSECONDS_PER_MICROSECOND; + this->normalize(); + } + + /// @brief Converts from millisecond format to TimeValue format + void msec( int64_t milliseconds ) { + this->seconds_ = milliseconds / MILLISECONDS_PER_SECOND; + this->nanos_ = NanoSecondsType(milliseconds % MILLISECONDS_PER_SECOND) * + NANOSECONDS_PER_MILLISECOND; + this->normalize(); + } + + /// Converts the \p seconds argument from PosixTime to the corresponding + /// TimeValue and assigns that value to \p this. + /// @brief Convert seconds form PosixTime to TimeValue + void fromEpochTime( SecondsType seconds ) { + seconds_ = seconds + PosixZeroTime.seconds_; + nanos_ = 0; + this->normalize(); + } + + /// Converts the \p win32Time argument from Windows FILETIME to the + /// corresponding TimeValue and assigns that value to \p this. + /// @brief Convert seconds form Windows FILETIME to TimeValue + void fromWin32Time( uint64_t win32Time ) { + this->seconds_ = win32Time / 10000000 + Win32ZeroTime.seconds_; + this->nanos_ = NanoSecondsType(win32Time % 10000000) * 100; + } + + /// @} + /// @name Implementation + /// @{ + private: + /// This causes the values to be represented so that the fractional + /// part is minimized, possibly incrementing the seconds part. + /// @brief Normalize to canonical form. + void normalize(); + + /// @} + /// @name Data + /// @{ + private: + /// Store the values as a <timeval>. + SecondsType seconds_;///< Stores the seconds part of the TimeVal + NanoSecondsType nanos_; ///< Stores the nanoseconds part of the TimeVal + /// @} + + }; + +inline TimeValue operator + (const TimeValue &tv1, const TimeValue &tv2) { + TimeValue sum (tv1.seconds_ + tv2.seconds_, tv1.nanos_ + tv2.nanos_); + sum.normalize (); + return sum; +} + +inline TimeValue operator - (const TimeValue &tv1, const TimeValue &tv2) { + TimeValue difference (tv1.seconds_ - tv2.seconds_, tv1.nanos_ - tv2.nanos_ ); + difference.normalize (); + return difference; +} + +} +} + +#endif diff --git a/include/llvm/Target/DarwinTargetAsmInfo.h b/include/llvm/Target/DarwinTargetAsmInfo.h new file mode 100644 index 0000000..6241ffe --- /dev/null +++ b/include/llvm/Target/DarwinTargetAsmInfo.h @@ -0,0 +1,50 @@ +//===---- DarwinTargetAsmInfo.h - Darwin asm properties ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines target asm properties related what form asm statements +// should take in general on Darwin-based targets +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DARWIN_TARGET_ASM_INFO_H +#define LLVM_DARWIN_TARGET_ASM_INFO_H + +#include "llvm/Target/TargetAsmInfo.h" + +namespace llvm { + class GlobalValue; + class GlobalVariable; + class Type; + class Mangler; + + struct DarwinTargetAsmInfo: public TargetAsmInfo { + const Section* TextCoalSection; + const Section* ConstTextCoalSection; + const Section* ConstDataCoalSection; + const Section* ConstDataSection; + const Section* DataCoalSection; + const Section* FourByteConstantSection; + const Section* EightByteConstantSection; + const Section* SixteenByteConstantSection; + + explicit DarwinTargetAsmInfo(const TargetMachine &TM); + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + virtual std::string UniqueSectionForGlobal(const GlobalValue* GV, + SectionKind::Kind kind) const; + virtual bool emitUsedDirectiveFor(const GlobalValue *GV, + Mangler *Mang) const; + const Section* MergeableConstSection(const GlobalVariable *GV) const; + const Section* MergeableConstSection(const Type *Ty) const; + const Section* MergeableStringSection(const GlobalVariable *GV) const; + const Section* SelectSectionForMachineConst(const Type *Ty) const; + }; +} + + +#endif // LLVM_DARWIN_TARGET_ASM_INFO_H diff --git a/include/llvm/Target/ELFTargetAsmInfo.h b/include/llvm/Target/ELFTargetAsmInfo.h new file mode 100644 index 0000000..6181e59 --- /dev/null +++ b/include/llvm/Target/ELFTargetAsmInfo.h @@ -0,0 +1,45 @@ +//===---- ELFTargetAsmInfo.h - ELF asm properties ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines target asm properties related what form asm statements +// should take in general on ELF-based targets +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ELF_TARGET_ASM_INFO_H +#define LLVM_ELF_TARGET_ASM_INFO_H + +#include "llvm/Target/TargetAsmInfo.h" + +namespace llvm { + class GlobalValue; + class GlobalVariable; + class Type; + + struct ELFTargetAsmInfo: public TargetAsmInfo { + explicit ELFTargetAsmInfo(const TargetMachine &TM); + + SectionKind::Kind SectionKindForGlobal(const GlobalValue *GV) const; + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + virtual std::string printSectionFlags(unsigned flags) const; + const Section* MergeableConstSection(const GlobalVariable *GV) const; + inline const Section* MergeableConstSection(const Type *Ty) const; + const Section* MergeableStringSection(const GlobalVariable *GV) const; + virtual const Section* + SelectSectionForMachineConst(const Type *Ty) const; + + const Section* DataRelSection; + const Section* DataRelLocalSection; + const Section* DataRelROSection; + const Section* DataRelROLocalSection; + }; +} + + +#endif // LLVM_ELF_TARGET_ASM_INFO_H diff --git a/include/llvm/Target/SubtargetFeature.h b/include/llvm/Target/SubtargetFeature.h new file mode 100644 index 0000000..5cfdc02 --- /dev/null +++ b/include/llvm/Target/SubtargetFeature.h @@ -0,0 +1,114 @@ +//===-- llvm/Target/SubtargetFeature.h - CPU characteristics ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines and manages user or tool specified CPU characteristics. +// The intent is to be able to package specific features that should or should +// not be used on a specific target processor. A tool, such as llc, could, as +// as example, gather chip info from the command line, a long with features +// that should be used on that chip. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_SUBTARGETFEATURE_H +#define LLVM_TARGET_SUBTARGETFEATURE_H + +#include <string> +#include <vector> +#include <iosfwd> +#include <cstring> +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// SubtargetFeatureKV - Used to provide key value pairs for feature and +/// CPU bit flags. +// +struct SubtargetFeatureKV { + const char *Key; // K-V key string + const char *Desc; // Help descriptor + uint32_t Value; // K-V integer value + uint32_t Implies; // K-V bit mask + + // Compare routine for std binary search + bool operator<(const SubtargetFeatureKV &S) const { + return strcmp(Key, S.Key) < 0; + } +}; + +//===----------------------------------------------------------------------===// +/// +/// SubtargetInfoKV - Used to provide key value pairs for CPU and arbitrary +/// pointers. +// +struct SubtargetInfoKV { + const char *Key; // K-V key string + void *Value; // K-V pointer value + + // Compare routine for std binary search + bool operator<(const SubtargetInfoKV &S) const { + return strcmp(Key, S.Key) < 0; + } +}; + +//===----------------------------------------------------------------------===// +/// +/// SubtargetFeatures - Manages the enabling and disabling of subtarget +/// specific features. Features are encoded as a string of the form +/// "cpu,+attr1,+attr2,-attr3,...,+attrN" +/// A comma separates each feature from the next (all lowercase.) +/// The first feature is always the CPU subtype (eg. pentiumm). If the CPU +/// value is "generic" then the CPU subtype should be generic for the target. +/// Each of the remaining features is prefixed with + or - indicating whether +/// that feature should be enabled or disabled contrary to the cpu +/// specification. +/// + +class SubtargetFeatures { + std::vector<std::string> Features; // Subtarget features as a vector +public: + explicit SubtargetFeatures(const std::string &Initial = std::string()); + + /// Features string accessors. + std::string getString() const; + void setString(const std::string &Initial); + + /// Set the CPU string. Replaces previous setting. Setting to "" clears CPU. + void setCPU(const std::string &String); + + /// Setting CPU string only if no string is set. + void setCPUIfNone(const std::string &String); + + /// Returns current CPU string. + const std::string & getCPU() const; + + /// Adding Features. + void AddFeature(const std::string &String, bool IsEnabled = true); + + /// Get feature bits. + uint32_t getBits(const SubtargetFeatureKV *CPUTable, + size_t CPUTableSize, + const SubtargetFeatureKV *FeatureTable, + size_t FeatureTableSize); + + /// Get info pointer + void *getInfo(const SubtargetInfoKV *Table, size_t TableSize); + + /// Print feature string. + void print(std::ostream &OS) const; + void print(std::ostream *OS) const { if (OS) print(*OS); } + + // Dump feature info. + void dump() const; +}; + +} // End namespace llvm + +#endif diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td new file mode 100644 index 0000000..3f1cdd2 --- /dev/null +++ b/include/llvm/Target/Target.td @@ -0,0 +1,507 @@ +//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent interfaces which should be +// implemented by each target which is using a TableGen based code generator. +// +//===----------------------------------------------------------------------===// + +// Include all information about LLVM intrinsics. +include "llvm/Intrinsics.td" + +//===----------------------------------------------------------------------===// +// Register file description - These classes are used to fill in the target +// description classes. + +class RegisterClass; // Forward def + +// Register - You should define one instance of this class for each register +// in the target machine. String n will become the "name" of the register. +class Register<string n> { + string Namespace = ""; + string AsmName = n; + + // SpillSize - If this value is set to a non-zero value, it is the size in + // bits of the spill slot required to hold this register. If this value is + // set to zero, the information is inferred from any register classes the + // register belongs to. + int SpillSize = 0; + + // SpillAlignment - This value is used to specify the alignment required for + // spilling the register. Like SpillSize, this should only be explicitly + // specified if the register is not in a register class. + int SpillAlignment = 0; + + // Aliases - A list of registers that this register overlaps with. A read or + // modification of this register can potentially read or modify the aliased + // registers. + list<Register> Aliases = []; + + // SubRegs - A list of registers that are parts of this register. Note these + // are "immediate" sub-registers and the registers within the list do not + // themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX], + // not [AX, AH, AL]. + list<Register> SubRegs = []; + + // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register. + // These values can be determined by locating the <target>.h file in the + // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The + // order of these names correspond to the enumeration used by gcc. A value of + // -1 indicates that the gcc number is undefined and -2 that register number + // is invalid for this mode/flavour. + list<int> DwarfNumbers = []; +} + +// RegisterWithSubRegs - This can be used to define instances of Register which +// need to specify sub-registers. +// List "subregs" specifies which registers are sub-registers to this one. This +// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc. +// This allows the code generator to be careful not to put two values with +// overlapping live ranges into registers which alias. +class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> { + let SubRegs = subregs; +} + +// SubRegSet - This can be used to define a specific mapping of registers to +// indices, for use as named subregs of a particular physical register. Each +// register in 'subregs' becomes an addressable subregister at index 'n' of the +// corresponding register in 'regs'. +class SubRegSet<int n, list<Register> regs, list<Register> subregs> { + int index = n; + + list<Register> From = regs; + list<Register> To = subregs; +} + +// RegisterClass - Now that all of the registers are defined, and aliases +// between registers are defined, specify which registers belong to which +// register classes. This also defines the default allocation order of +// registers by register allocators. +// +class RegisterClass<string namespace, list<ValueType> regTypes, int alignment, + list<Register> regList> { + string Namespace = namespace; + + // RegType - Specify the list ValueType of the registers in this register + // class. Note that all registers in a register class must have the same + // ValueTypes. This is a list because some targets permit storing different + // types in same register, for example vector values with 128-bit total size, + // but different count/size of items, like SSE on x86. + // + list<ValueType> RegTypes = regTypes; + + // Size - Specify the spill size in bits of the registers. A default value of + // zero lets tablgen pick an appropriate size. + int Size = 0; + + // Alignment - Specify the alignment required of the registers when they are + // stored or loaded to memory. + // + int Alignment = alignment; + + // CopyCost - This value is used to specify the cost of copying a value + // between two registers in this register class. The default value is one + // meaning it takes a single instruction to perform the copying. A negative + // value means copying is extremely expensive or impossible. + int CopyCost = 1; + + // MemberList - Specify which registers are in this class. If the + // allocation_order_* method are not specified, this also defines the order of + // allocation used by the register allocator. + // + list<Register> MemberList = regList; + + // SubClassList - Specify which register classes correspond to subregisters + // of this class. The order should be by subregister set index. + list<RegisterClass> SubRegClassList = []; + + // MethodProtos/MethodBodies - These members can be used to insert arbitrary + // code into a generated register class. The normal usage of this is to + // overload virtual methods. + code MethodProtos = [{}]; + code MethodBodies = [{}]; +} + + +//===----------------------------------------------------------------------===// +// DwarfRegNum - This class provides a mapping of the llvm register enumeration +// to the register numbering used by gcc and gdb. These values are used by a +// debug information writer (ex. DwarfWriter) to describe where values may be +// located during execution. +class DwarfRegNum<list<int> Numbers> { + // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register. + // These values can be determined by locating the <target>.h file in the + // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The + // order of these names correspond to the enumeration used by gcc. A value of + // -1 indicates that the gcc number is undefined and -2 that register number is + // invalid for this mode/flavour. + list<int> DwarfNumbers = Numbers; +} + +//===----------------------------------------------------------------------===// +// Pull in the common support for scheduling +// +include "llvm/Target/TargetSchedule.td" + +class Predicate; // Forward def + +//===----------------------------------------------------------------------===// +// Instruction set description - These classes correspond to the C++ classes in +// the Target/TargetInstrInfo.h file. +// +class Instruction { + string Namespace = ""; + + dag OutOperandList; // An dag containing the MI def operand list. + dag InOperandList; // An dag containing the MI use operand list. + string AsmString = ""; // The .s format to print the instruction with. + + // Pattern - Set to the DAG pattern for this instruction, if we know of one, + // otherwise, uninitialized. + list<dag> Pattern; + + // The follow state will eventually be inferred automatically from the + // instruction pattern. + + list<Register> Uses = []; // Default to using no non-operand registers + list<Register> Defs = []; // Default to modifying no non-operand registers + + // Predicates - List of predicates which will be turned into isel matching + // code. + list<Predicate> Predicates = []; + + // Code size. + int CodeSize = 0; + + // Added complexity passed onto matching pattern. + int AddedComplexity = 0; + + // These bits capture information about the high-level semantics of the + // instruction. + bit isReturn = 0; // Is this instruction a return instruction? + bit isBranch = 0; // Is this instruction a branch instruction? + bit isIndirectBranch = 0; // Is this instruction an indirect branch? + bit isBarrier = 0; // Can control flow fall through this instruction? + bit isCall = 0; // Is this instruction a call instruction? + bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand? + bit mayLoad = 0; // Is it possible for this inst to read memory? + bit mayStore = 0; // Is it possible for this inst to write memory? + bit isTwoAddress = 0; // Is this a two address instruction? + bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote? + bit isCommutable = 0; // Is this 3 operand instruction commutable? + bit isTerminator = 0; // Is this part of the terminator for a basic block? + bit isReMaterializable = 0; // Is this instruction re-materializable? + bit isPredicable = 0; // Is this instruction predicable? + bit hasDelaySlot = 0; // Does this instruction have an delay slot? + bit usesCustomDAGSchedInserter = 0; // Pseudo instr needing special help. + bit hasCtrlDep = 0; // Does this instruction r/w ctrl-flow chains? + bit isNotDuplicable = 0; // Is it unsafe to duplicate this instruction? + bit isAsCheapAsAMove = 0; // As cheap (or cheaper) than a move instruction. + + // Side effect flags - When set, the flags have these meanings: + // + // hasSideEffects - The instruction has side effects that are not + // captured by any operands of the instruction or other flags. + // + // mayHaveSideEffects - Some instances of the instruction can have side + // effects. The virtual method "isReallySideEffectFree" is called to + // determine this. Load instructions are an example of where this is + // useful. In general, loads always have side effects. However, loads from + // constant pools don't. Individual back ends make this determination. + // + // neverHasSideEffects - Set on an instruction with no pattern if it has no + // side effects. + bit hasSideEffects = 0; + bit mayHaveSideEffects = 0; + bit neverHasSideEffects = 0; + + InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling. + + string Constraints = ""; // OperandConstraint, e.g. $src = $dst. + + /// DisableEncoding - List of operand names (e.g. "$op1,$op2") that should not + /// be encoded into the output machineinstr. + string DisableEncoding = ""; +} + +/// Predicates - These are extra conditionals which are turned into instruction +/// selector matching code. Currently each predicate is just a string. +class Predicate<string cond> { + string CondString = cond; +} + +/// NoHonorSignDependentRounding - This predicate is true if support for +/// sign-dependent-rounding is not enabled. +def NoHonorSignDependentRounding + : Predicate<"!HonorSignDependentRoundingFPMath()">; + +class Requires<list<Predicate> preds> { + list<Predicate> Predicates = preds; +} + +/// ops definition - This is just a simple marker used to identify the operands +/// list for an instruction. outs and ins are identical both syntatically and +/// semantically, they are used to define def operands and use operands to +/// improve readibility. This should be used like this: +/// (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar. +def ops; +def outs; +def ins; + +/// variable_ops definition - Mark this instruction as taking a variable number +/// of operands. +def variable_ops; + +/// ptr_rc definition - Mark this operand as being a pointer value whose +/// register class is resolved dynamically via a callback to TargetInstrInfo. +/// FIXME: We should probably change this to a class which contain a list of +/// flags. But currently we have but one flag. +def ptr_rc; + +/// unknown definition - Mark this operand as being of unknown type, causing +/// it to be resolved by inference in the context it is used. +def unknown; + +/// Operand Types - These provide the built-in operand types that may be used +/// by a target. Targets can optionally provide their own operand types as +/// needed, though this should not be needed for RISC targets. +class Operand<ValueType ty> { + ValueType Type = ty; + string PrintMethod = "printOperand"; + dag MIOperandInfo = (ops); +} + +def i1imm : Operand<i1>; +def i8imm : Operand<i8>; +def i16imm : Operand<i16>; +def i32imm : Operand<i32>; +def i64imm : Operand<i64>; + +def f32imm : Operand<f32>; +def f64imm : Operand<f64>; + +/// zero_reg definition - Special node to stand for the zero register. +/// +def zero_reg; + +/// PredicateOperand - This can be used to define a predicate operand for an +/// instruction. OpTypes specifies the MIOperandInfo for the operand, and +/// AlwaysVal specifies the value of this predicate when set to "always +/// execute". +class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal> + : Operand<ty> { + let MIOperandInfo = OpTypes; + dag DefaultOps = AlwaysVal; +} + +/// OptionalDefOperand - This is used to define a optional definition operand +/// for an instruction. DefaultOps is the register the operand represents if none +/// is supplied, e.g. zero_reg. +class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops> + : Operand<ty> { + let MIOperandInfo = OpTypes; + dag DefaultOps = defaultops; +} + + +// InstrInfo - This class should only be instantiated once to provide parameters +// which are global to the the target machine. +// +class InstrInfo { + // If the target wants to associate some target-specific information with each + // instruction, it should provide these two lists to indicate how to assemble + // the target specific information into the 32 bits available. + // + list<string> TSFlagsFields = []; + list<int> TSFlagsShifts = []; + + // Target can specify its instructions in either big or little-endian formats. + // For instance, while both Sparc and PowerPC are big-endian platforms, the + // Sparc manual specifies its instructions in the format [31..0] (big), while + // PowerPC specifies them using the format [0..31] (little). + bit isLittleEndianEncoding = 0; +} + +// Standard Instructions. +def PHI : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops variable_ops); + let AsmString = "PHINODE"; + let Namespace = "TargetInstrInfo"; +} +def INLINEASM : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops variable_ops); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; +} +def DBG_LABEL : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops i32imm:$id); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def EH_LABEL : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops i32imm:$id); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def GC_LABEL : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops i32imm:$id); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def DECLARE : Instruction { + let OutOperandList = (ops); + let InOperandList = (ops variable_ops); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let hasCtrlDep = 1; +} +def EXTRACT_SUBREG : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$supersrc, i32imm:$subidx); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; +} +def INSERT_SUBREG : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$supersrc, unknown:$subsrc, i32imm:$subidx); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; + let Constraints = "$supersrc = $dst"; +} +def IMPLICIT_DEF : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; + let isReMaterializable = 1; + let isAsCheapAsAMove = 1; +} +def SUBREG_TO_REG : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$implsrc, unknown:$subsrc, i32imm:$subidx); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; +} +def COPY_TO_REGCLASS : Instruction { + let OutOperandList = (ops unknown:$dst); + let InOperandList = (ops unknown:$src, i32imm:$regclass); + let AsmString = ""; + let Namespace = "TargetInstrInfo"; + let neverHasSideEffects = 1; + let isAsCheapAsAMove = 1; +} + +//===----------------------------------------------------------------------===// +// AsmWriter - This class can be implemented by targets that need to customize +// the format of the .s file writer. +// +// Subtargets can have multiple different asmwriters (e.g. AT&T vs Intel syntax +// on X86 for example). +// +class AsmWriter { + // AsmWriterClassName - This specifies the suffix to use for the asmwriter + // class. Generated AsmWriter classes are always prefixed with the target + // name. + string AsmWriterClassName = "AsmPrinter"; + + // InstFormatName - AsmWriters can specify the name of the format string to + // print instructions with. + string InstFormatName = "AsmString"; + + // Variant - AsmWriters can be of multiple different variants. Variants are + // used to support targets that need to emit assembly code in ways that are + // mostly the same for different targets, but have minor differences in + // syntax. If the asmstring contains {|} characters in them, this integer + // will specify which alternative to use. For example "{x|y|z}" with Variant + // == 1, will expand to "y". + int Variant = 0; +} +def DefaultAsmWriter : AsmWriter; + + +//===----------------------------------------------------------------------===// +// Target - This class contains the "global" target information +// +class Target { + // InstructionSet - Instruction set description for this target. + InstrInfo InstructionSet; + + // AssemblyWriters - The AsmWriter instances available for this target. + list<AsmWriter> AssemblyWriters = [DefaultAsmWriter]; +} + +//===----------------------------------------------------------------------===// +// SubtargetFeature - A characteristic of the chip set. +// +class SubtargetFeature<string n, string a, string v, string d, + list<SubtargetFeature> i = []> { + // Name - Feature name. Used by command line (-mattr=) to determine the + // appropriate target chip. + // + string Name = n; + + // Attribute - Attribute to be set by feature. + // + string Attribute = a; + + // Value - Value the attribute to be set to by feature. + // + string Value = v; + + // Desc - Feature description. Used by command line (-mattr=) to display help + // information. + // + string Desc = d; + + // Implies - Features that this feature implies are present. If one of those + // features isn't set, then this one shouldn't be set either. + // + list<SubtargetFeature> Implies = i; +} + +//===----------------------------------------------------------------------===// +// Processor chip sets - These values represent each of the chip sets supported +// by the scheduler. Each Processor definition requires corresponding +// instruction itineraries. +// +class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> { + // Name - Chip set name. Used by command line (-mcpu=) to determine the + // appropriate target chip. + // + string Name = n; + + // ProcItin - The scheduling information for the target processor. + // + ProcessorItineraries ProcItin = pi; + + // Features - list of + list<SubtargetFeature> Features = f; +} + +//===----------------------------------------------------------------------===// +// Pull in the common support for calling conventions. +// +include "llvm/Target/TargetCallingConv.td" + +//===----------------------------------------------------------------------===// +// Pull in the common support for DAG isel generation. +// +include "llvm/Target/TargetSelectionDAG.td" diff --git a/include/llvm/Target/TargetAsmInfo.h b/include/llvm/Target/TargetAsmInfo.h new file mode 100644 index 0000000..f223f47 --- /dev/null +++ b/include/llvm/Target/TargetAsmInfo.h @@ -0,0 +1,932 @@ +//===-- llvm/Target/TargetAsmInfo.h - Asm info ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a class to be used as the basis for target specific +// asm writers. This class primarily takes care of global printing constants, +// which are used in very similar ways across all targets. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_ASM_INFO_H +#define LLVM_TARGET_ASM_INFO_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Support/DataTypes.h" +#include <string> + +namespace llvm { + // DWARF encoding query type + namespace DwarfEncoding { + enum Target { + Data = 0, + CodeLabels = 1, + Functions = 2 + }; + } + + namespace SectionKind { + enum Kind { + Unknown = 0, ///< Custom section + Text, ///< Text section + Data, ///< Data section + DataRel, ///< Contains data that has relocations + DataRelLocal, ///< Contains data that has only local relocations + BSS, ///< BSS section + ROData, ///< Readonly data section + DataRelRO, ///< Contains data that is otherwise readonly + DataRelROLocal, ///< Contains r/o data with only local relocations + RODataMergeStr, ///< Readonly data section (mergeable strings) + RODataMergeConst, ///< Readonly data section (mergeable constants) + SmallData, ///< Small data section + SmallBSS, ///< Small bss section + SmallROData, ///< Small readonly section + ThreadData, ///< Initialized TLS data objects + ThreadBSS ///< Uninitialized TLS data objects + }; + + static inline bool isReadOnly(Kind K) { + return (K == SectionKind::ROData || + K == SectionKind::RODataMergeConst || + K == SectionKind::RODataMergeStr || + K == SectionKind::SmallROData); + } + + static inline bool isBSS(Kind K) { + return (K == SectionKind::BSS || + K == SectionKind::SmallBSS); + } + } + + namespace SectionFlags { + const unsigned Invalid = -1U; + const unsigned None = 0; + const unsigned Code = 1 << 0; ///< Section contains code + const unsigned Writeable = 1 << 1; ///< Section is writeable + const unsigned BSS = 1 << 2; ///< Section contains only zeroes + const unsigned Mergeable = 1 << 3; ///< Section contains mergeable data + const unsigned Strings = 1 << 4; ///< Section contains C-type strings + const unsigned TLS = 1 << 5; ///< Section contains thread-local data + const unsigned Debug = 1 << 6; ///< Section contains debug data + const unsigned Linkonce = 1 << 7; ///< Section is linkonce + const unsigned Small = 1 << 8; ///< Section is small + const unsigned TypeFlags = 0xFF; + // Some gap for future flags + const unsigned Named = 1 << 23; ///< Section is named + const unsigned EntitySize = 0xFF << 24; ///< Entity size for mergeable stuff + + static inline unsigned getEntitySize(unsigned Flags) { + return (Flags >> 24) & 0xFF; + } + + static inline unsigned setEntitySize(unsigned Flags, unsigned Size) { + return ((Flags & ~EntitySize) | ((Size & 0xFF) << 24)); + } + + struct KeyInfo { + static inline unsigned getEmptyKey() { return Invalid; } + static inline unsigned getTombstoneKey() { return Invalid - 1; } + static unsigned getHashValue(const unsigned &Key) { return Key; } + static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; } + static bool isPod() { return true; } + }; + + typedef DenseMap<unsigned, std::string, KeyInfo> FlagsStringsMapType; + } + + class TargetMachine; + class CallInst; + class GlobalValue; + class Type; + class Mangler; + + class Section { + friend class TargetAsmInfo; + friend class StringMapEntry<Section>; + friend class StringMap<Section>; + + std::string Name; + unsigned Flags; + explicit Section(unsigned F = SectionFlags::Invalid):Flags(F) { } + + public: + + bool isNamed() const { return Flags & SectionFlags::Named; } + unsigned getEntitySize() const { return (Flags >> 24) & 0xFF; } + + const std::string& getName() const { return Name; } + unsigned getFlags() const { return Flags; } + }; + + /// TargetAsmInfo - This class is intended to be used as a base class for asm + /// properties and features specific to the target. + class TargetAsmInfo { + private: + mutable StringMap<Section> Sections; + mutable SectionFlags::FlagsStringsMapType FlagsStrings; + void fillDefaultValues(); + protected: + /// TM - The current TargetMachine. + const TargetMachine &TM; + + //===------------------------------------------------------------------===// + // Properties to be set by the target writer, used to configure asm printer. + // + + /// TextSection - Section directive for standard text. + /// + const Section *TextSection; // Defaults to ".text". + + /// DataSection - Section directive for standard data. + /// + const Section *DataSection; // Defaults to ".data". + + /// BSSSection - Section directive for uninitialized data. Null if this + /// target doesn't support a BSS section. + /// + const char *BSSSection; // Default to ".bss". + const Section *BSSSection_; + + /// ReadOnlySection - This is the directive that is emitted to switch to a + /// read-only section for constant data (e.g. data declared const, + /// jump tables). + const Section *ReadOnlySection; // Defaults to NULL + + /// SmallDataSection - This is the directive that is emitted to switch to a + /// small data section. + /// + const Section *SmallDataSection; // Defaults to NULL + + /// SmallBSSSection - This is the directive that is emitted to switch to a + /// small bss section. + /// + const Section *SmallBSSSection; // Defaults to NULL + + /// SmallRODataSection - This is the directive that is emitted to switch to + /// a small read-only data section. + /// + const Section *SmallRODataSection; // Defaults to NULL + + /// TLSDataSection - Section directive for Thread Local data. + /// + const Section *TLSDataSection; // Defaults to ".tdata". + + /// TLSBSSSection - Section directive for Thread Local uninitialized data. + /// Null if this target doesn't support a BSS section. + /// + const Section *TLSBSSSection; // Defaults to ".tbss". + + /// ZeroFillDirective - Directive for emitting a global to the ZeroFill + /// section on this target. Null if this target doesn't support zerofill. + const char *ZeroFillDirective; // Default is null. + + /// NonexecutableStackDirective - Directive for declaring to the + /// linker and beyond that the emitted code does not require stack + /// memory to be executable. + const char *NonexecutableStackDirective; // Default is null. + + /// NeedsSet - True if target asm treats expressions in data directives + /// as linktime-relocatable. For assembly-time computation, we need to + /// use a .set. Thus: + /// .set w, x-y + /// .long w + /// is computed at assembly time, while + /// .long x-y + /// is relocated if the relative locations of x and y change at linktime. + /// We want both these things in different places. + bool NeedsSet; // Defaults to false. + + /// MaxInstLength - This is the maximum possible length of an instruction, + /// which is needed to compute the size of an inline asm. + unsigned MaxInstLength; // Defaults to 4. + + /// PCSymbol - The symbol used to represent the current PC. Used in PC + /// relative expressions. + const char *PCSymbol; // Defaults to "$". + + /// SeparatorChar - This character, if specified, is used to separate + /// instructions from each other when on the same line. This is used to + /// measure inline asm instructions. + char SeparatorChar; // Defaults to ';' + + /// CommentString - This indicates the comment character used by the + /// assembler. + const char *CommentString; // Defaults to "#" + + /// GlobalPrefix - If this is set to a non-empty string, it is prepended + /// onto all global symbols. This is often used for "_" or ".". + const char *GlobalPrefix; // Defaults to "" + + /// PrivateGlobalPrefix - This prefix is used for globals like constant + /// pool entries that are completely private to the .s file and should not + /// have names in the .o file. This is often "." or "L". + const char *PrivateGlobalPrefix; // Defaults to "." + + /// LessPrivateGlobalPrefix - This prefix is used for symbols that should + /// be passed through the assembler but be removed by the linker. This + /// is "l" on Darwin, currently used for some ObjC metadata. + const char *LessPrivateGlobalPrefix; // Defaults to "" + + /// JumpTableSpecialLabelPrefix - If not null, a extra (dead) label is + /// emitted before jump tables with the specified prefix. + const char *JumpTableSpecialLabelPrefix; // Default to null. + + /// GlobalVarAddrPrefix/Suffix - If these are nonempty, these strings + /// will enclose any GlobalVariable (that isn't a function) + /// + const char *GlobalVarAddrPrefix; // Defaults to "" + const char *GlobalVarAddrSuffix; // Defaults to "" + + /// FunctionAddrPrefix/Suffix - If these are nonempty, these strings + /// will enclose any GlobalVariable that points to a function. + /// For example, this is used by the IA64 backend to materialize + /// function descriptors, by decorating the ".data8" object with the + /// @verbatim @fptr( ) @endverbatim + /// link-relocation operator. + /// + const char *FunctionAddrPrefix; // Defaults to "" + const char *FunctionAddrSuffix; // Defaults to "" + + /// PersonalityPrefix/Suffix - If these are nonempty, these strings will + /// enclose any personality function in the common frame section. + /// + const char *PersonalityPrefix; // Defaults to "" + const char *PersonalitySuffix; // Defaults to "" + + /// NeedsIndirectEncoding - If set, we need to set the indirect encoding bit + /// for EH in Dwarf. + /// + bool NeedsIndirectEncoding; // Defaults to false + + /// InlineAsmStart/End - If these are nonempty, they contain a directive to + /// emit before and after an inline assembly statement. + const char *InlineAsmStart; // Defaults to "#APP\n" + const char *InlineAsmEnd; // Defaults to "#NO_APP\n" + + /// AssemblerDialect - Which dialect of an assembler variant to use. + unsigned AssemblerDialect; // Defaults to 0 + + /// StringConstantPrefix - Prefix for FEs to use when generating unnamed + /// constant strings. These names get run through the Mangler later; if + /// you want the Mangler not to add the GlobalPrefix as well, + /// use '\1' as the first character. + const char *StringConstantPrefix; // Defaults to ".str" + + //===--- Data Emission Directives -------------------------------------===// + + /// ZeroDirective - this should be set to the directive used to get some + /// number of zero bytes emitted to the current section. Common cases are + /// "\t.zero\t" and "\t.space\t". If this is set to null, the + /// Data*bitsDirective's will be used to emit zero bytes. + const char *ZeroDirective; // Defaults to "\t.zero\t" + const char *ZeroDirectiveSuffix; // Defaults to "" + + /// AsciiDirective - This directive allows emission of an ascii string with + /// the standard C escape characters embedded into it. + const char *AsciiDirective; // Defaults to "\t.ascii\t" + + /// AscizDirective - If not null, this allows for special handling of + /// zero terminated strings on this target. This is commonly supported as + /// ".asciz". If a target doesn't support this, it can be set to null. + const char *AscizDirective; // Defaults to "\t.asciz\t" + + /// DataDirectives - These directives are used to output some unit of + /// integer data to the current section. If a data directive is set to + /// null, smaller data directives will be used to emit the large sizes. + const char *Data8bitsDirective; // Defaults to "\t.byte\t" + const char *Data16bitsDirective; // Defaults to "\t.short\t" + const char *Data32bitsDirective; // Defaults to "\t.long\t" + const char *Data64bitsDirective; // Defaults to "\t.quad\t" + + /// getASDirective - Targets can override it to provide different data + /// directives for various sizes and non-default address spaces. + virtual const char *getASDirective(unsigned size, + unsigned AS) const { + assert (AS > 0 + && "Dont know the directives for default addr space"); + return NULL; + } + + //===--- Alignment Information ----------------------------------------===// + + /// AlignDirective - The directive used to emit round up to an alignment + /// boundary. + /// + const char *AlignDirective; // Defaults to "\t.align\t" + + /// AlignmentIsInBytes - If this is true (the default) then the asmprinter + /// emits ".align N" directives, where N is the number of bytes to align to. + /// Otherwise, it emits ".align log2(N)", e.g. 3 to align to an 8 byte + /// boundary. + bool AlignmentIsInBytes; // Defaults to true + + /// TextAlignFillValue - If non-zero, this is used to fill the executable + /// space created as the result of a alignment directive. + unsigned TextAlignFillValue; + + //===--- Section Switching Directives ---------------------------------===// + + /// SwitchToSectionDirective - This is the directive used when we want to + /// emit a global to an arbitrary section. The section name is emited after + /// this. + const char *SwitchToSectionDirective; // Defaults to "\t.section\t" + + /// TextSectionStartSuffix - This is printed after each start of section + /// directive for text sections. + const char *TextSectionStartSuffix; // Defaults to "". + + /// DataSectionStartSuffix - This is printed after each start of section + /// directive for data sections. + const char *DataSectionStartSuffix; // Defaults to "". + + /// SectionEndDirectiveSuffix - If non-null, the asm printer will close each + /// section with the section name and this suffix printed. + const char *SectionEndDirectiveSuffix;// Defaults to null. + + /// ConstantPoolSection - This is the section that we SwitchToSection right + /// before emitting the constant pool for a function. + const char *ConstantPoolSection; // Defaults to "\t.section .rodata" + + /// JumpTableDataSection - This is the section that we SwitchToSection right + /// before emitting the jump tables for a function when the relocation model + /// is not PIC. + const char *JumpTableDataSection; // Defaults to "\t.section .rodata" + + /// JumpTableDirective - if non-null, the directive to emit before a jump + /// table. + const char *JumpTableDirective; + + /// CStringSection - If not null, this allows for special handling of + /// cstring constants (null terminated string that does not contain any + /// other null bytes) on this target. This is commonly supported as + /// ".cstring". + const char *CStringSection; // Defaults to NULL + const Section *CStringSection_; + + /// StaticCtorsSection - This is the directive that is emitted to switch to + /// a section to emit the static constructor list. + /// Defaults to "\t.section .ctors,\"aw\",@progbits". + const char *StaticCtorsSection; + + /// StaticDtorsSection - This is the directive that is emitted to switch to + /// a section to emit the static destructor list. + /// Defaults to "\t.section .dtors,\"aw\",@progbits". + const char *StaticDtorsSection; + + //===--- Global Variable Emission Directives --------------------------===// + + /// GlobalDirective - This is the directive used to declare a global entity. + /// + const char *GlobalDirective; // Defaults to NULL. + + /// ExternDirective - This is the directive used to declare external + /// globals. + /// + const char *ExternDirective; // Defaults to NULL. + + /// SetDirective - This is the name of a directive that can be used to tell + /// the assembler to set the value of a variable to some expression. + const char *SetDirective; // Defaults to null. + + /// LCOMMDirective - This is the name of a directive (if supported) that can + /// be used to efficiently declare a local (internal) block of zero + /// initialized data in the .bss/.data section. The syntax expected is: + /// @verbatim <LCOMMDirective> SYMBOLNAME LENGTHINBYTES, ALIGNMENT + /// @endverbatim + const char *LCOMMDirective; // Defaults to null. + + const char *COMMDirective; // Defaults to "\t.comm\t". + + /// COMMDirectiveTakesAlignment - True if COMMDirective take a third + /// argument that specifies the alignment of the declaration. + bool COMMDirectiveTakesAlignment; // Defaults to true. + + /// HasDotTypeDotSizeDirective - True if the target has .type and .size + /// directives, this is true for most ELF targets. + bool HasDotTypeDotSizeDirective; // Defaults to true. + + /// HasSingleParameterDotFile - True if the target has a single parameter + /// .file directive, this is true for ELF targets. + bool HasSingleParameterDotFile; // Defaults to true. + + /// UsedDirective - This directive, if non-null, is used to declare a global + /// as being used somehow that the assembler can't see. This prevents dead + /// code elimination on some targets. + const char *UsedDirective; // Defaults to null. + + /// WeakRefDirective - This directive, if non-null, is used to declare a + /// global as being a weak undefined symbol. + const char *WeakRefDirective; // Defaults to null. + + /// WeakDefDirective - This directive, if non-null, is used to declare a + /// global as being a weak defined symbol. + const char *WeakDefDirective; // Defaults to null. + + /// HiddenDirective - This directive, if non-null, is used to declare a + /// global or function as having hidden visibility. + const char *HiddenDirective; // Defaults to "\t.hidden\t". + + /// ProtectedDirective - This directive, if non-null, is used to declare a + /// global or function as having protected visibility. + const char *ProtectedDirective; // Defaults to "\t.protected\t". + + //===--- Dwarf Emission Directives -----------------------------------===// + + /// AbsoluteDebugSectionOffsets - True if we should emit abolute section + /// offsets for debug information. Defaults to false. + bool AbsoluteDebugSectionOffsets; + + /// AbsoluteEHSectionOffsets - True if we should emit abolute section + /// offsets for EH information. Defaults to false. + bool AbsoluteEHSectionOffsets; + + /// HasLEB128 - True if target asm supports leb128 directives. + /// + bool HasLEB128; // Defaults to false. + + /// hasDotLocAndDotFile - True if target asm supports .loc and .file + /// directives for emitting debugging information. + /// + bool HasDotLocAndDotFile; // Defaults to false. + + /// SupportsDebugInformation - True if target supports emission of debugging + /// information. + bool SupportsDebugInformation; + + /// SupportsExceptionHandling - True if target supports + /// exception handling. + /// + bool SupportsExceptionHandling; // Defaults to false. + + /// RequiresFrameSection - true if the Dwarf2 output needs a frame section + /// + bool DwarfRequiresFrameSection; // Defaults to true. + + /// DwarfUsesInlineInfoSection - True if DwarfDebugInlineSection is used to + /// encode inline subroutine information. + bool DwarfUsesInlineInfoSection; // Defaults to false. + + /// SupportsMacInfo - true if the Dwarf output supports macro information + /// + bool SupportsMacInfoSection; // Defaults to true + + /// NonLocalEHFrameLabel - If set, the EH_frame label needs to be non-local. + /// + bool NonLocalEHFrameLabel; // Defaults to false. + + /// GlobalEHDirective - This is the directive used to make exception frame + /// tables globally visible. + /// + const char *GlobalEHDirective; // Defaults to NULL. + + /// SupportsWeakEmptyEHFrame - True if target assembler and linker will + /// handle a weak_definition of constant 0 for an omitted EH frame. + bool SupportsWeakOmittedEHFrame; // Defaults to true. + + /// DwarfSectionOffsetDirective - Special section offset directive. + const char* DwarfSectionOffsetDirective; // Defaults to NULL + + /// DwarfAbbrevSection - Section directive for Dwarf abbrev. + /// + const char *DwarfAbbrevSection; // Defaults to ".debug_abbrev". + + /// DwarfInfoSection - Section directive for Dwarf info. + /// + const char *DwarfInfoSection; // Defaults to ".debug_info". + + /// DwarfLineSection - Section directive for Dwarf info. + /// + const char *DwarfLineSection; // Defaults to ".debug_line". + + /// DwarfFrameSection - Section directive for Dwarf info. + /// + const char *DwarfFrameSection; // Defaults to ".debug_frame". + + /// DwarfPubNamesSection - Section directive for Dwarf info. + /// + const char *DwarfPubNamesSection; // Defaults to ".debug_pubnames". + + /// DwarfPubTypesSection - Section directive for Dwarf info. + /// + const char *DwarfPubTypesSection; // Defaults to ".debug_pubtypes". + + /// DwarfDebugInlineSection - Section directive for inline info. + /// + const char *DwarfDebugInlineSection; // Defaults to ".debug_inlined" + + /// DwarfStrSection - Section directive for Dwarf info. + /// + const char *DwarfStrSection; // Defaults to ".debug_str". + + /// DwarfLocSection - Section directive for Dwarf info. + /// + const char *DwarfLocSection; // Defaults to ".debug_loc". + + /// DwarfARangesSection - Section directive for Dwarf info. + /// + const char *DwarfARangesSection; // Defaults to ".debug_aranges". + + /// DwarfRangesSection - Section directive for Dwarf info. + /// + const char *DwarfRangesSection; // Defaults to ".debug_ranges". + + /// DwarfMacInfoSection - Section directive for Dwarf info. + /// + const char *DwarfMacInfoSection; // Defaults to ".debug_macinfo". + + /// DwarfEHFrameSection - Section directive for Exception frames. + /// + const char *DwarfEHFrameSection; // Defaults to ".eh_frame". + + /// DwarfExceptionSection - Section directive for Exception table. + /// + const char *DwarfExceptionSection; // Defaults to ".gcc_except_table". + + //===--- CBE Asm Translation Table -----------------------------------===// + + const char *const *AsmTransCBE; // Defaults to empty + + public: + explicit TargetAsmInfo(const TargetMachine &TM); + virtual ~TargetAsmInfo(); + + const Section* getNamedSection(const char *Name, + unsigned Flags = SectionFlags::None, + bool Override = false) const; + const Section* getUnnamedSection(const char *Directive, + unsigned Flags = SectionFlags::None, + bool Override = false) const; + + /// Measure the specified inline asm to determine an approximation of its + /// length. + virtual unsigned getInlineAsmLength(const char *Str) const; + + /// ExpandInlineAsm - This hook allows the target to expand an inline asm + /// call to be explicit llvm code if it wants to. This is useful for + /// turning simple inline asms into LLVM intrinsics, which gives the + /// compiler more information about the behavior of the code. + virtual bool ExpandInlineAsm(CallInst *CI) const { + return false; + } + + /// emitUsedDirectiveFor - This hook allows targets to selectively decide + /// not to emit the UsedDirective for some symbols in llvm.used. + virtual bool emitUsedDirectiveFor(const GlobalValue *GV, + Mangler *Mang) const { + return (GV!=0); + } + + /// PreferredEHDataFormat - This hook allows the target to select data + /// format used for encoding pointers in exception handling data. Reason is + /// 0 for data, 1 for code labels, 2 for function pointers. Global is true + /// if the symbol can be relocated. + virtual unsigned PreferredEHDataFormat(DwarfEncoding::Target Reason, + bool Global) const; + + /// SectionKindForGlobal - This hook allows the target to select proper + /// section kind used for global emission. + virtual SectionKind::Kind + SectionKindForGlobal(const GlobalValue *GV) const; + + /// RelocBehaviour - Describes how relocations should be treated when + /// selecting sections. Reloc::Global bit should be set if global + /// relocations should force object to be placed in read-write + /// sections. Reloc::Local bit should be set if local relocations should + /// force object to be placed in read-write sections. + virtual unsigned RelocBehaviour() const; + + /// SectionFlagsForGlobal - This hook allows the target to select proper + /// section flags either for given global or for section. + virtual unsigned + SectionFlagsForGlobal(const GlobalValue *GV = NULL, + const char* name = NULL) const; + + /// SectionForGlobal - This hooks returns proper section name for given + /// global with all necessary flags and marks. + virtual const Section* SectionForGlobal(const GlobalValue *GV) const; + + // Helper methods for SectionForGlobal + virtual std::string UniqueSectionForGlobal(const GlobalValue* GV, + SectionKind::Kind kind) const; + + const std::string& getSectionFlags(unsigned Flags) const; + virtual std::string printSectionFlags(unsigned flags) const { return ""; } + + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + + virtual const Section* SelectSectionForMachineConst(const Type *Ty) const; + + /// getSLEB128Size - Compute the number of bytes required for a signed + /// leb128 value. + + static unsigned getSLEB128Size(int Value); + + /// getULEB128Size - Compute the number of bytes required for an unsigned + /// leb128 value. + + static unsigned getULEB128Size(unsigned Value); + + // Data directive accessors + // + const char *getData8bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data8bitsDirective : getASDirective(8, AS); + } + const char *getData16bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data16bitsDirective : getASDirective(16, AS); + } + const char *getData32bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data32bitsDirective : getASDirective(32, AS); + } + const char *getData64bitsDirective(unsigned AS = 0) const { + return AS == 0 ? Data64bitsDirective : getASDirective(64, AS); + } + + + // Accessors. + // + const Section *getTextSection() const { + return TextSection; + } + const Section *getDataSection() const { + return DataSection; + } + const char *getBSSSection() const { + return BSSSection; + } + const Section *getBSSSection_() const { + return BSSSection_; + } + const Section *getReadOnlySection() const { + return ReadOnlySection; + } + const Section *getSmallDataSection() const { + return SmallDataSection; + } + const Section *getSmallBSSSection() const { + return SmallBSSSection; + } + const Section *getSmallRODataSection() const { + return SmallRODataSection; + } + const Section *getTLSDataSection() const { + return TLSDataSection; + } + const Section *getTLSBSSSection() const { + return TLSBSSSection; + } + const char *getZeroFillDirective() const { + return ZeroFillDirective; + } + const char *getNonexecutableStackDirective() const { + return NonexecutableStackDirective; + } + bool needsSet() const { + return NeedsSet; + } + const char *getPCSymbol() const { + return PCSymbol; + } + char getSeparatorChar() const { + return SeparatorChar; + } + const char *getCommentString() const { + return CommentString; + } + const char *getGlobalPrefix() const { + return GlobalPrefix; + } + const char *getPrivateGlobalPrefix() const { + return PrivateGlobalPrefix; + } + /// EHGlobalPrefix - Prefix for EH_frame and the .eh symbols. + /// This is normally PrivateGlobalPrefix, but some targets want + /// these symbols to be visible. + virtual const char *getEHGlobalPrefix() const { + return PrivateGlobalPrefix; + } + const char *getLessPrivateGlobalPrefix() const { + return LessPrivateGlobalPrefix; + } + const char *getJumpTableSpecialLabelPrefix() const { + return JumpTableSpecialLabelPrefix; + } + const char *getGlobalVarAddrPrefix() const { + return GlobalVarAddrPrefix; + } + const char *getGlobalVarAddrSuffix() const { + return GlobalVarAddrSuffix; + } + const char *getFunctionAddrPrefix() const { + return FunctionAddrPrefix; + } + const char *getFunctionAddrSuffix() const { + return FunctionAddrSuffix; + } + const char *getPersonalityPrefix() const { + return PersonalityPrefix; + } + const char *getPersonalitySuffix() const { + return PersonalitySuffix; + } + bool getNeedsIndirectEncoding() const { + return NeedsIndirectEncoding; + } + const char *getInlineAsmStart() const { + return InlineAsmStart; + } + const char *getInlineAsmEnd() const { + return InlineAsmEnd; + } + unsigned getAssemblerDialect() const { + return AssemblerDialect; + } + const char *getStringConstantPrefix() const { + return StringConstantPrefix; + } + const char *getZeroDirective() const { + return ZeroDirective; + } + const char *getZeroDirectiveSuffix() const { + return ZeroDirectiveSuffix; + } + const char *getAsciiDirective() const { + return AsciiDirective; + } + const char *getAscizDirective() const { + return AscizDirective; + } + const char *getJumpTableDirective() const { + return JumpTableDirective; + } + const char *getAlignDirective() const { + return AlignDirective; + } + bool getAlignmentIsInBytes() const { + return AlignmentIsInBytes; + } + unsigned getTextAlignFillValue() const { + return TextAlignFillValue; + } + const char *getSwitchToSectionDirective() const { + return SwitchToSectionDirective; + } + const char *getTextSectionStartSuffix() const { + return TextSectionStartSuffix; + } + const char *getDataSectionStartSuffix() const { + return DataSectionStartSuffix; + } + const char *getSectionEndDirectiveSuffix() const { + return SectionEndDirectiveSuffix; + } + const char *getConstantPoolSection() const { + return ConstantPoolSection; + } + const char *getJumpTableDataSection() const { + return JumpTableDataSection; + } + const char *getCStringSection() const { + return CStringSection; + } + const Section *getCStringSection_() const { + return CStringSection_; + } + const char *getStaticCtorsSection() const { + return StaticCtorsSection; + } + const char *getStaticDtorsSection() const { + return StaticDtorsSection; + } + const char *getGlobalDirective() const { + return GlobalDirective; + } + const char *getExternDirective() const { + return ExternDirective; + } + const char *getSetDirective() const { + return SetDirective; + } + const char *getLCOMMDirective() const { + return LCOMMDirective; + } + const char *getCOMMDirective() const { + return COMMDirective; + } + bool getCOMMDirectiveTakesAlignment() const { + return COMMDirectiveTakesAlignment; + } + bool hasDotTypeDotSizeDirective() const { + return HasDotTypeDotSizeDirective; + } + bool hasSingleParameterDotFile() const { + return HasSingleParameterDotFile; + } + const char *getUsedDirective() const { + return UsedDirective; + } + const char *getWeakRefDirective() const { + return WeakRefDirective; + } + const char *getWeakDefDirective() const { + return WeakDefDirective; + } + const char *getHiddenDirective() const { + return HiddenDirective; + } + const char *getProtectedDirective() const { + return ProtectedDirective; + } + bool isAbsoluteDebugSectionOffsets() const { + return AbsoluteDebugSectionOffsets; + } + bool isAbsoluteEHSectionOffsets() const { + return AbsoluteEHSectionOffsets; + } + bool hasLEB128() const { + return HasLEB128; + } + bool hasDotLocAndDotFile() const { + return HasDotLocAndDotFile; + } + bool doesSupportDebugInformation() const { + return SupportsDebugInformation; + } + bool doesSupportExceptionHandling() const { + return SupportsExceptionHandling; + } + bool doesDwarfRequireFrameSection() const { + return DwarfRequiresFrameSection; + } + bool doesDwarfUsesInlineInfoSection() const { + return DwarfUsesInlineInfoSection; + } + bool doesSupportMacInfoSection() const { + return SupportsMacInfoSection; + } + bool doesRequireNonLocalEHFrameLabel() const { + return NonLocalEHFrameLabel; + } + const char *getGlobalEHDirective() const { + return GlobalEHDirective; + } + bool getSupportsWeakOmittedEHFrame() const { + return SupportsWeakOmittedEHFrame; + } + const char *getDwarfSectionOffsetDirective() const { + return DwarfSectionOffsetDirective; + } + const char *getDwarfAbbrevSection() const { + return DwarfAbbrevSection; + } + const char *getDwarfInfoSection() const { + return DwarfInfoSection; + } + const char *getDwarfLineSection() const { + return DwarfLineSection; + } + const char *getDwarfFrameSection() const { + return DwarfFrameSection; + } + const char *getDwarfPubNamesSection() const { + return DwarfPubNamesSection; + } + const char *getDwarfPubTypesSection() const { + return DwarfPubTypesSection; + } + const char *getDwarfDebugInlineSection() const { + return DwarfDebugInlineSection; + } + const char *getDwarfStrSection() const { + return DwarfStrSection; + } + const char *getDwarfLocSection() const { + return DwarfLocSection; + } + const char *getDwarfARangesSection() const { + return DwarfARangesSection; + } + const char *getDwarfRangesSection() const { + return DwarfRangesSection; + } + const char *getDwarfMacInfoSection() const { + return DwarfMacInfoSection; + } + const char *getDwarfEHFrameSection() const { + return DwarfEHFrameSection; + } + const char *getDwarfExceptionSection() const { + return DwarfExceptionSection; + } + const char *const *getAsmCBE() const { + return AsmTransCBE; + } + }; +} + +#endif diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td new file mode 100644 index 0000000..224c08e --- /dev/null +++ b/include/llvm/Target/TargetCallingConv.td @@ -0,0 +1,114 @@ +//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent interfaces with which targets +// describe their calling conventions. +// +//===----------------------------------------------------------------------===// + +class CCAction; +class CallingConv; + +/// CCCustom - Calls a custom arg handling function. +class CCCustom<string fn> : CCAction { + string FuncName = fn; +} + +/// CCPredicateAction - Instances of this class check some predicate, then +/// delegate to another action if the predicate is true. +class CCPredicateAction<CCAction A> : CCAction { + CCAction SubAction = A; +} + +/// CCIfType - If the current argument is one of the specified types, apply +/// Action A. +class CCIfType<list<ValueType> vts, CCAction A> : CCPredicateAction<A> { + list<ValueType> VTs = vts; +} + +/// CCIf - If the predicate matches, apply A. +class CCIf<string predicate, CCAction A> : CCPredicateAction<A> { + string Predicate = predicate; +} + +/// CCIfByVal - If the current argument has ByVal parameter attribute, apply +/// Action A. +class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> { +} + +/// CCIfCC - Match of the current calling convention is 'CC'. +class CCIfCC<string CC, CCAction A> + : CCIf<!strconcat("State.getCallingConv() == ", CC), A> {} + +/// CCIfInReg - If this argument is marked with the 'inreg' attribute, apply +/// the specified action. +class CCIfInReg<CCAction A> : CCIf<"ArgFlags.isInReg()", A> {} + +/// CCIfNest - If this argument is marked with the 'nest' attribute, apply +/// the specified action. +class CCIfNest<CCAction A> : CCIf<"ArgFlags.isNest()", A> {} + +/// CCIfNotVarArg - If the current function is not vararg - apply the action +class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {} + +/// CCAssignToReg - This action matches if there is a register in the specified +/// list that is still available. If so, it assigns the value to the first +/// available register and succeeds. +class CCAssignToReg<list<Register> regList> : CCAction { + list<Register> RegList = regList; +} + +/// CCAssignToRegWithShadow - Same as CCAssignToReg, but with list of registers +/// which became shadowed, when some register is used. +class CCAssignToRegWithShadow<list<Register> regList, + list<Register> shadowList> : CCAction { + list<Register> RegList = regList; + list<Register> ShadowRegList = shadowList; +} + +/// CCAssignToStack - This action always matches: it assigns the value to a +/// stack slot of the specified size and alignment on the stack. If size is +/// zero then the ABI size is used; if align is zero then the ABI alignment +/// is used - these may depend on the target or subtarget. +class CCAssignToStack<int size, int align> : CCAction { + int Size = size; + int Align = align; +} + +/// CCPassByVal - This action always matches: it assigns the value to a stack +/// slot to implement ByVal aggregate parameter passing. Size and alignment +/// specify the minimum size and alignment for the stack slot. +class CCPassByVal<int size, int align> : CCAction { + int Size = size; + int Align = align; +} + +/// CCPromoteToType - If applied, this promotes the specified current value to +/// the specified type. +class CCPromoteToType<ValueType destTy> : CCAction { + ValueType DestTy = destTy; +} + +/// CCBitConvertToType - If applied, this bitconverts the specified current +/// value to the specified type. +class CCBitConvertToType<ValueType destTy> : CCAction { + ValueType DestTy = destTy; +} + +/// CCDelegateTo - This action invokes the specified sub-calling-convention. It +/// is successful if the specified CC matches. +class CCDelegateTo<CallingConv cc> : CCAction { + CallingConv CC = cc; +} + +/// CallingConv - An instance of this is used to define each calling convention +/// that the target supports. +class CallingConv<list<CCAction> actions> { + list<CCAction> Actions = actions; +} diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h new file mode 100644 index 0000000..82abfc7 --- /dev/null +++ b/include/llvm/Target/TargetData.h @@ -0,0 +1,317 @@ +//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines target properties related to datatype size/offset/alignment +// information. It uses lazy annotations to cache information about how +// structure types are laid out and used. +// +// This structure should be created once, filled in if the defaults are not +// correct and then passed around by const&. None of the members functions +// require modification to the object. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETDATA_H +#define LLVM_TARGET_TARGETDATA_H + +#include "llvm/Pass.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/SmallVector.h" +#include <string> + +namespace llvm { + +class Value; +class Type; +class IntegerType; +class StructType; +class StructLayout; +class GlobalVariable; + +/// Enum used to categorize the alignment types stored by TargetAlignElem +enum AlignTypeEnum { + INTEGER_ALIGN = 'i', ///< Integer type alignment + VECTOR_ALIGN = 'v', ///< Vector type alignment + FLOAT_ALIGN = 'f', ///< Floating point type alignment + AGGREGATE_ALIGN = 'a', ///< Aggregate alignment + STACK_ALIGN = 's' ///< Stack objects alignment +}; +/// Target alignment element. +/// +/// Stores the alignment data associated with a given alignment type (pointer, +/// integer, vector, float) and type bit width. +/// +/// @note The unusual order of elements in the structure attempts to reduce +/// padding and make the structure slightly more cache friendly. +struct TargetAlignElem { + AlignTypeEnum AlignType : 8; //< Alignment type (AlignTypeEnum) + unsigned char ABIAlign; //< ABI alignment for this type/bitw + unsigned char PrefAlign; //< Pref. alignment for this type/bitw + uint32_t TypeBitWidth; //< Type bit width + + /// Initializer + static TargetAlignElem get(AlignTypeEnum align_type, unsigned char abi_align, + unsigned char pref_align, uint32_t bit_width); + /// Equality predicate + bool operator==(const TargetAlignElem &rhs) const; + /// output stream operator + std::ostream &dump(std::ostream &os) const; +}; + +class TargetData : public ImmutablePass { +private: + bool LittleEndian; ///< Defaults to false + unsigned char PointerMemSize; ///< Pointer size in bytes + unsigned char PointerABIAlign; ///< Pointer ABI alignment + unsigned char PointerPrefAlign; ///< Pointer preferred alignment + + //! Where the primitive type alignment data is stored. + /*! + @sa init(). + @note Could support multiple size pointer alignments, e.g., 32-bit pointers + vs. 64-bit pointers by extending TargetAlignment, but for now, we don't. + */ + SmallVector<TargetAlignElem, 16> Alignments; + //! Alignment iterator shorthand + typedef SmallVector<TargetAlignElem, 16>::iterator align_iterator; + //! Constant alignment iterator shorthand + typedef SmallVector<TargetAlignElem, 16>::const_iterator align_const_iterator; + //! Invalid alignment. + /*! + This member is a signal that a requested alignment type and bit width were + not found in the SmallVector. + */ + static const TargetAlignElem InvalidAlignmentElem; + + //! Set/initialize target alignments + void setAlignment(AlignTypeEnum align_type, unsigned char abi_align, + unsigned char pref_align, uint32_t bit_width); + unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, + bool ABIAlign, const Type *Ty) const; + //! Internal helper method that returns requested alignment for type. + unsigned char getAlignment(const Type *Ty, bool abi_or_pref) const; + + /// Valid alignment predicate. + /// + /// Predicate that tests a TargetAlignElem reference returned by get() against + /// InvalidAlignmentElem. + inline bool validAlignment(const TargetAlignElem &align) const { + return (&align != &InvalidAlignmentElem); + } + +public: + /// Default ctor. + /// + /// @note This has to exist, because this is a pass, but it should never be + /// used. + TargetData() : ImmutablePass(&ID) { + assert(0 && "ERROR: Bad TargetData ctor used. " + "Tool did not specify a TargetData to use?"); + abort(); + } + + /// Constructs a TargetData from a specification string. See init(). + explicit TargetData(const std::string &TargetDescription) + : ImmutablePass(&ID) { + init(TargetDescription); + } + + /// Initialize target data from properties stored in the module. + explicit TargetData(const Module *M); + + TargetData(const TargetData &TD) : + ImmutablePass(&ID), + LittleEndian(TD.isLittleEndian()), + PointerMemSize(TD.PointerMemSize), + PointerABIAlign(TD.PointerABIAlign), + PointerPrefAlign(TD.PointerPrefAlign), + Alignments(TD.Alignments) + { } + + ~TargetData(); // Not virtual, do not subclass this class + + //! Parse a target data layout string and initialize TargetData alignments. + void init(const std::string &TargetDescription); + + /// Target endianness... + bool isLittleEndian() const { return LittleEndian; } + bool isBigEndian() const { return !LittleEndian; } + + /// getStringRepresentation - Return the string representation of the + /// TargetData. This representation is in the same format accepted by the + /// string constructor above. + std::string getStringRepresentation() const; + /// Target pointer alignment + unsigned char getPointerABIAlignment() const { return PointerABIAlign; } + /// Return target's alignment for stack-based pointers + unsigned char getPointerPrefAlignment() const { return PointerPrefAlign; } + /// Target pointer size + unsigned char getPointerSize() const { return PointerMemSize; } + /// Target pointer size, in bits + unsigned char getPointerSizeInBits() const { return 8*PointerMemSize; } + + /// Size examples: + /// + /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] + /// ---- ---------- --------------- --------------- + /// i1 1 8 8 + /// i8 8 8 8 + /// i19 19 24 32 + /// i32 32 32 32 + /// i100 100 104 128 + /// i128 128 128 128 + /// Float 32 32 32 + /// Double 64 64 64 + /// X86_FP80 80 80 96 + /// + /// [*] The alloc size depends on the alignment, and thus on the target. + /// These values are for x86-32 linux. + + /// getTypeSizeInBits - Return the number of bits necessary to hold the + /// specified type. For example, returns 36 for i36 and 80 for x86_fp80. + uint64_t getTypeSizeInBits(const Type* Ty) const; + + /// getTypeStoreSize - Return the maximum number of bytes that may be + /// overwritten by storing the specified type. For example, returns 5 + /// for i36 and 10 for x86_fp80. + uint64_t getTypeStoreSize(const Type *Ty) const { + return (getTypeSizeInBits(Ty)+7)/8; + } + + /// getTypeStoreSizeInBits - Return the maximum number of bits that may be + /// overwritten by storing the specified type; always a multiple of 8. For + /// example, returns 40 for i36 and 80 for x86_fp80. + uint64_t getTypeStoreSizeInBits(const Type *Ty) const { + return 8*getTypeStoreSize(Ty); + } + + /// getTypeAllocSize - Return the offset in bytes between successive objects + /// of the specified type, including alignment padding. This is the amount + /// that alloca reserves for this type. For example, returns 12 or 16 for + /// x86_fp80, depending on alignment. + uint64_t getTypeAllocSize(const Type* Ty) const { + // Round up to the next alignment boundary. + return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); + } + + /// getTypeAllocSizeInBits - Return the offset in bits between successive + /// objects of the specified type, including alignment padding; always a + /// multiple of 8. This is the amount that alloca reserves for this type. + /// For example, returns 96 or 128 for x86_fp80, depending on alignment. + uint64_t getTypeAllocSizeInBits(const Type* Ty) const { + return 8*getTypeAllocSize(Ty); + } + + /// getABITypeAlignment - Return the minimum ABI-required alignment for the + /// specified type. + unsigned char getABITypeAlignment(const Type *Ty) const; + + /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment + /// for the specified type when it is part of a call frame. + unsigned char getCallFrameTypeAlignment(const Type *Ty) const; + + + /// getPrefTypeAlignment - Return the preferred stack/global alignment for + /// the specified type. This is always at least as good as the ABI alignment. + unsigned char getPrefTypeAlignment(const Type *Ty) const; + + /// getPreferredTypeAlignmentShift - Return the preferred alignment for the + /// specified type, returned as log2 of the value (a shift amount). + /// + unsigned char getPreferredTypeAlignmentShift(const Type *Ty) const; + + /// getIntPtrType - Return an unsigned integer type that is the same size or + /// greater to the host pointer size. + /// + const IntegerType *getIntPtrType() const; + + /// getIndexedOffset - return the offset from the beginning of the type for + /// the specified indices. This is used to implement getelementptr. + /// + uint64_t getIndexedOffset(const Type *Ty, + Value* const* Indices, unsigned NumIndices) const; + + /// getStructLayout - Return a StructLayout object, indicating the alignment + /// of the struct, its size, and the offsets of its fields. Note that this + /// information is lazily cached. + const StructLayout *getStructLayout(const StructType *Ty) const; + + /// InvalidateStructLayoutInfo - TargetData speculatively caches StructLayout + /// objects. If a TargetData object is alive when types are being refined and + /// removed, this method must be called whenever a StructType is removed to + /// avoid a dangling pointer in this cache. + void InvalidateStructLayoutInfo(const StructType *Ty) const; + + /// getPreferredAlignment - Return the preferred alignment of the specified + /// global. This includes an explicitly requested alignment (if the global + /// has one). + unsigned getPreferredAlignment(const GlobalVariable *GV) const; + + /// getPreferredAlignmentLog - Return the preferred alignment of the + /// specified global, returned in log form. This includes an explicitly + /// requested alignment (if the global has one). + unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const; + + /// RoundUpAlignment - Round the specified value up to the next alignment + /// boundary specified by Alignment. For example, 7 rounded up to an + /// alignment boundary of 4 is 8. 8 rounded up to the alignment boundary of 4 + /// is 8 because it is already aligned. + template <typename UIntTy> + static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) { + assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!"); + return (Val + (Alignment-1)) & ~UIntTy(Alignment-1); + } + + static char ID; // Pass identification, replacement for typeid +}; + +/// StructLayout - used to lazily calculate structure layout information for a +/// target machine, based on the TargetData structure. +/// +class StructLayout { + uint64_t StructSize; + unsigned StructAlignment; + unsigned NumElements; + uint64_t MemberOffsets[1]; // variable sized array! +public: + + uint64_t getSizeInBytes() const { + return StructSize; + } + + uint64_t getSizeInBits() const { + return 8*StructSize; + } + + unsigned getAlignment() const { + return StructAlignment; + } + + /// getElementContainingOffset - Given a valid byte offset into the structure, + /// return the structure index that contains it. + /// + unsigned getElementContainingOffset(uint64_t Offset) const; + + uint64_t getElementOffset(unsigned Idx) const { + assert(Idx < NumElements && "Invalid element idx!"); + return MemberOffsets[Idx]; + } + + uint64_t getElementOffsetInBits(unsigned Idx) const { + return getElementOffset(Idx)*8; + } + +private: + friend class TargetData; // Only TargetData can create this class + StructLayout(const StructType *ST, const TargetData &TD); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetELFWriterInfo.h b/include/llvm/Target/TargetELFWriterInfo.h new file mode 100644 index 0000000..548cc07 --- /dev/null +++ b/include/llvm/Target/TargetELFWriterInfo.h @@ -0,0 +1,41 @@ +//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetELFWriterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H +#define LLVM_TARGET_TARGETELFWRITERINFO_H + +namespace llvm { + + //===--------------------------------------------------------------------===// + // TargetELFWriterInfo + //===--------------------------------------------------------------------===// + + class TargetELFWriterInfo { + // EMachine - This field is the target specific value to emit as the + // e_machine member of the ELF header. + unsigned short EMachine; + public: + enum MachineType { + NoMachine, + EM_386 = 3 + }; + + explicit TargetELFWriterInfo(MachineType machine) : EMachine(machine) {} + virtual ~TargetELFWriterInfo() {} + + unsigned short getEMachine() const { return EMachine; } + }; + +} // end llvm namespace + +#endif // LLVM_TARGET_TARGETELFWRITERINFO_H diff --git a/include/llvm/Target/TargetFrameInfo.h b/include/llvm/Target/TargetFrameInfo.h new file mode 100644 index 0000000..3e26b9d --- /dev/null +++ b/include/llvm/Target/TargetFrameInfo.h @@ -0,0 +1,80 @@ +//===-- llvm/Target/TargetFrameInfo.h ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Interface to describe the layout of a stack frame on the target machine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETFRAMEINFO_H +#define LLVM_TARGET_TARGETFRAMEINFO_H + +#include <utility> + +namespace llvm { + +/// Information about stack frame layout on the target. It holds the direction +/// of stack growth, the known stack alignment on entry to each function, and +/// the offset to the locals area. +/// +/// The offset to the local area is the offset from the stack pointer on +/// function entry to the first location where function data (local variables, +/// spill locations) can be stored. +class TargetFrameInfo { +public: + enum StackDirection { + StackGrowsUp, // Adding to the stack increases the stack address + StackGrowsDown // Adding to the stack decreases the stack address + }; +private: + StackDirection StackDir; + unsigned StackAlignment; + int LocalAreaOffset; +public: + TargetFrameInfo(StackDirection D, unsigned StackAl, int LAO) + : StackDir(D), StackAlignment(StackAl), LocalAreaOffset(LAO) {} + + virtual ~TargetFrameInfo(); + + // These methods return information that describes the abstract stack layout + // of the target machine. + + /// getStackGrowthDirection - Return the direction the stack grows + /// + StackDirection getStackGrowthDirection() const { return StackDir; } + + /// getStackAlignment - This method returns the number of bytes that the stack + /// pointer must be aligned to. Typically, this is the largest alignment for + /// any data object in the target. + /// + unsigned getStackAlignment() const { return StackAlignment; } + + /// getOffsetOfLocalArea - This method returns the offset of the local area + /// from the stack pointer on entrance to a function. + /// + int getOffsetOfLocalArea() const { return LocalAreaOffset; } + + /// getCalleeSavedSpillSlots - This method returns a pointer to an array of + /// pairs, that contains an entry for each callee saved register that must be + /// spilled to a particular stack location if it is spilled. + /// + /// Each entry in this array contains a <register,offset> pair, indicating the + /// fixed offset from the incoming stack pointer that each register should be + /// spilled at. If a register is not listed here, the code generator is + /// allowed to spill it anywhere it chooses. + /// + virtual const std::pair<unsigned, int> * + getCalleeSavedSpillSlots(unsigned &NumEntries) const { + NumEntries = 0; + return 0; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h new file mode 100644 index 0000000..622a216 --- /dev/null +++ b/include/llvm/Target/TargetInstrDesc.h @@ -0,0 +1,435 @@ +//===-- llvm/Target/TargetInstrDesc.h - Instruction Descriptors -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetOperandInfo and TargetInstrDesc classes, which +// are used to describe target instructions and their operands. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRDESC_H +#define LLVM_TARGET_TARGETINSTRDESC_H + +namespace llvm { + +class TargetRegisterClass; + +//===----------------------------------------------------------------------===// +// Machine Operand Flags and Description +//===----------------------------------------------------------------------===// + +namespace TOI { + // Operand constraints: only "tied_to" for now. + enum OperandConstraint { + TIED_TO = 0 // Must be allocated the same register as. + }; + + /// OperandFlags - These are flags set on operands, but should be considered + /// private, all access should go through the TargetOperandInfo accessors. + /// See the accessors for a description of what these are. + enum OperandFlags { + LookupPtrRegClass = 0, + Predicate, + OptionalDef + }; +} + +/// TargetOperandInfo - This holds information about one operand of a machine +/// instruction, indicating the register class for register operands, etc. +/// +class TargetOperandInfo { +public: + /// RegClass - This specifies the register class enumeration of the operand + /// if the operand is a register. If not, this contains 0. + unsigned short RegClass; + unsigned short Flags; + /// Lower 16 bits are used to specify which constraints are set. The higher 16 + /// bits are used to specify the value of constraints (4 bits each). + unsigned int Constraints; + /// Currently no other information. + + /// isLookupPtrRegClass - Set if this operand is a pointer value and it + /// requires a callback to look up its register class. + bool isLookupPtrRegClass() const { return Flags&(1 <<TOI::LookupPtrRegClass);} + + /// isPredicate - Set if this is one of the operands that made up of + /// the predicate operand that controls an isPredicable() instruction. + bool isPredicate() const { return Flags & (1 << TOI::Predicate); } + + /// isOptionalDef - Set if this operand is a optional def. + /// + bool isOptionalDef() const { return Flags & (1 << TOI::OptionalDef); } +}; + + +//===----------------------------------------------------------------------===// +// Machine Instruction Flags and Description +//===----------------------------------------------------------------------===// + +/// TargetInstrDesc flags - These should be considered private to the +/// implementation of the TargetInstrDesc class. Clients should use the +/// predicate methods on TargetInstrDesc, not use these directly. These +/// all correspond to bitfields in the TargetInstrDesc::Flags field. +namespace TID { + enum { + Variadic = 0, + HasOptionalDef, + Return, + Call, + Barrier, + Terminator, + Branch, + IndirectBranch, + Predicable, + NotDuplicable, + DelaySlot, + FoldableAsLoad, + MayLoad, + MayStore, + UnmodeledSideEffects, + Commutable, + ConvertibleTo3Addr, + UsesCustomDAGSchedInserter, + Rematerializable, + CheapAsAMove + }; +} + +/// TargetInstrDesc - Describe properties that are true of each +/// instruction in the target description file. This captures information about +/// side effects, register use and many other things. There is one instance of +/// this struct for each target instruction class, and the MachineInstr class +/// points to this struct directly to describe itself. +class TargetInstrDesc { +public: + unsigned short Opcode; // The opcode number + unsigned short NumOperands; // Num of args (may be more if variable_ops) + unsigned short NumDefs; // Num of args that are definitions + unsigned short SchedClass; // enum identifying instr sched class + const char * Name; // Name of the instruction record in td file + unsigned Flags; // Flags identifying machine instr class + unsigned TSFlags; // Target Specific Flag values + const unsigned *ImplicitUses; // Registers implicitly read by this instr + const unsigned *ImplicitDefs; // Registers implicitly defined by this instr + const TargetRegisterClass **RCBarriers; // Reg classes completely "clobbered" + const TargetOperandInfo *OpInfo; // 'NumOperands' entries about operands + + /// getOperandConstraint - Returns the value of the specific constraint if + /// it is set. Returns -1 if it is not set. + int getOperandConstraint(unsigned OpNum, + TOI::OperandConstraint Constraint) const { + if (OpNum < NumOperands && + (OpInfo[OpNum].Constraints & (1 << Constraint))) { + unsigned Pos = 16 + Constraint * 4; + return (int)(OpInfo[OpNum].Constraints >> Pos) & 0xf; + } + return -1; + } + + /// getOpcode - Return the opcode number for this descriptor. + unsigned getOpcode() const { + return Opcode; + } + + /// getName - Return the name of the record in the .td file for this + /// instruction, for example "ADD8ri". + const char *getName() const { + return Name; + } + + /// getNumOperands - Return the number of declared MachineOperands for this + /// MachineInstruction. Note that variadic (isVariadic() returns true) + /// instructions may have additional operands at the end of the list, and note + /// that the machine instruction may include implicit register def/uses as + /// well. + unsigned getNumOperands() const { + return NumOperands; + } + + /// getNumDefs - Return the number of MachineOperands that are register + /// definitions. Register definitions always occur at the start of the + /// machine operand list. This is the number of "outs" in the .td file, + /// and does not include implicit defs. + unsigned getNumDefs() const { + return NumDefs; + } + + /// isVariadic - Return true if this instruction can have a variable number of + /// operands. In this case, the variable operands will be after the normal + /// operands but before the implicit definitions and uses (if any are + /// present). + bool isVariadic() const { + return Flags & (1 << TID::Variadic); + } + + /// hasOptionalDef - Set if this instruction has an optional definition, e.g. + /// ARM instructions which can set condition code if 's' bit is set. + bool hasOptionalDef() const { + return Flags & (1 << TID::HasOptionalDef); + } + + /// getImplicitUses - Return a list of registers that are potentially + /// read by any instance of this machine instruction. For example, on X86, + /// the "adc" instruction adds two register operands and adds the carry bit in + /// from the flags register. In this case, the instruction is marked as + /// implicitly reading the flags. Likewise, the variable shift instruction on + /// X86 is marked as implicitly reading the 'CL' register, which it always + /// does. + /// + /// This method returns null if the instruction has no implicit uses. + const unsigned *getImplicitUses() const { + return ImplicitUses; + } + + /// getImplicitDefs - Return a list of registers that are potentially + /// written by any instance of this machine instruction. For example, on X86, + /// many instructions implicitly set the flags register. In this case, they + /// are marked as setting the FLAGS. Likewise, many instructions always + /// deposit their result in a physical register. For example, the X86 divide + /// instruction always deposits the quotient and remainder in the EAX/EDX + /// registers. For that instruction, this will return a list containing the + /// EAX/EDX/EFLAGS registers. + /// + /// This method returns null if the instruction has no implicit defs. + const unsigned *getImplicitDefs() const { + return ImplicitDefs; + } + + /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly + /// uses the specified physical register. + bool hasImplicitUseOfPhysReg(unsigned Reg) const { + if (const unsigned *ImpUses = ImplicitUses) + for (; *ImpUses; ++ImpUses) + if (*ImpUses == Reg) return true; + return false; + } + + /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly + /// defines the specified physical register. + bool hasImplicitDefOfPhysReg(unsigned Reg) const { + if (const unsigned *ImpDefs = ImplicitDefs) + for (; *ImpDefs; ++ImpDefs) + if (*ImpDefs == Reg) return true; + return false; + } + + /// getRegClassBarriers - Return a list of register classes that are + /// completely clobbered by this machine instruction. For example, on X86 + /// the call instructions will completely clobber all the registers in the + /// fp stack and XMM classes. + /// + /// This method returns null if the instruction doesn't completely clobber + /// any register class. + const TargetRegisterClass **getRegClassBarriers() const { + return RCBarriers; + } + + /// getSchedClass - Return the scheduling class for this instruction. The + /// scheduling class is an index into the InstrItineraryData table. This + /// returns zero if there is no known scheduling information for the + /// instruction. + /// + unsigned getSchedClass() const { + return SchedClass; + } + + bool isReturn() const { + return Flags & (1 << TID::Return); + } + + bool isCall() const { + return Flags & (1 << TID::Call); + } + + /// isBarrier - Returns true if the specified instruction stops control flow + /// from executing the instruction immediately following it. Examples include + /// unconditional branches and return instructions. + bool isBarrier() const { + return Flags & (1 << TID::Barrier); + } + + /// isTerminator - Returns true if this instruction part of the terminator for + /// a basic block. Typically this is things like return and branch + /// instructions. + /// + /// Various passes use this to insert code into the bottom of a basic block, + /// but before control flow occurs. + bool isTerminator() const { + return Flags & (1 << TID::Terminator); + } + + /// isBranch - Returns true if this is a conditional, unconditional, or + /// indirect branch. Predicates below can be used to discriminate between + /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to + /// get more information. + bool isBranch() const { + return Flags & (1 << TID::Branch); + } + + /// isIndirectBranch - Return true if this is an indirect branch, such as a + /// branch through a register. + bool isIndirectBranch() const { + return Flags & (1 << TID::IndirectBranch); + } + + /// isConditionalBranch - Return true if this is a branch which may fall + /// through to the next instruction or may transfer control flow to some other + /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more + /// information about this branch. + bool isConditionalBranch() const { + return isBranch() & !isBarrier() & !isIndirectBranch(); + } + + /// isUnconditionalBranch - Return true if this is a branch which always + /// transfers control flow to some other block. The + /// TargetInstrInfo::AnalyzeBranch method can be used to get more information + /// about this branch. + bool isUnconditionalBranch() const { + return isBranch() & isBarrier() & !isIndirectBranch(); + } + + // isPredicable - Return true if this instruction has a predicate operand that + // controls execution. It may be set to 'always', or may be set to other + /// values. There are various methods in TargetInstrInfo that can be used to + /// control and modify the predicate in this instruction. + bool isPredicable() const { + return Flags & (1 << TID::Predicable); + } + + /// isNotDuplicable - Return true if this instruction cannot be safely + /// duplicated. For example, if the instruction has a unique labels attached + /// to it, duplicating it would cause multiple definition errors. + bool isNotDuplicable() const { + return Flags & (1 << TID::NotDuplicable); + } + + /// hasDelaySlot - Returns true if the specified instruction has a delay slot + /// which must be filled by the code generator. + bool hasDelaySlot() const { + return Flags & (1 << TID::DelaySlot); + } + + /// canFoldAsLoad - Return true for instructions that can be folded as + /// memory operands in other instructions. The most common use for this + /// is instructions that are simple loads from memory that don't modify + /// the loaded value in any way, but it can also be used for instructions + /// that can be expressed as constant-pool loads, such as V_SETALLONES + /// on x86, to allow them to be folded when it is beneficial. + /// This should only be set on instructions that return a value in their + /// only virtual register definition. + bool canFoldAsLoad() const { + return Flags & (1 << TID::FoldableAsLoad); + } + + //===--------------------------------------------------------------------===// + // Side Effect Analysis + //===--------------------------------------------------------------------===// + + /// mayLoad - Return true if this instruction could possibly read memory. + /// Instructions with this flag set are not necessarily simple load + /// instructions, they may load a value and modify it, for example. + bool mayLoad() const { + return Flags & (1 << TID::MayLoad); + } + + + /// mayStore - Return true if this instruction could possibly modify memory. + /// Instructions with this flag set are not necessarily simple store + /// instructions, they may store a modified value based on their operands, or + /// may not actually modify anything, for example. + bool mayStore() const { + return Flags & (1 << TID::MayStore); + } + + /// hasUnmodeledSideEffects - Return true if this instruction has side + /// effects that are not modeled by other flags. This does not return true + /// for instructions whose effects are captured by: + /// + /// 1. Their operand list and implicit definition/use list. Register use/def + /// info is explicit for instructions. + /// 2. Memory accesses. Use mayLoad/mayStore. + /// 3. Calling, branching, returning: use isCall/isReturn/isBranch. + /// + /// Examples of side effects would be modifying 'invisible' machine state like + /// a control register, flushing a cache, modifying a register invisible to + /// LLVM, etc. + /// + bool hasUnmodeledSideEffects() const { + return Flags & (1 << TID::UnmodeledSideEffects); + } + + //===--------------------------------------------------------------------===// + // Flags that indicate whether an instruction can be modified by a method. + //===--------------------------------------------------------------------===// + + /// isCommutable - Return true if this may be a 2- or 3-address + /// instruction (of the form "X = op Y, Z, ..."), which produces the same + /// result if Y and Z are exchanged. If this flag is set, then the + /// TargetInstrInfo::commuteInstruction method may be used to hack on the + /// instruction. + /// + /// Note that this flag may be set on instructions that are only commutable + /// sometimes. In these cases, the call to commuteInstruction will fail. + /// Also note that some instructions require non-trivial modification to + /// commute them. + bool isCommutable() const { + return Flags & (1 << TID::Commutable); + } + + /// isConvertibleTo3Addr - Return true if this is a 2-address instruction + /// which can be changed into a 3-address instruction if needed. Doing this + /// transformation can be profitable in the register allocator, because it + /// means that the instruction can use a 2-address form if possible, but + /// degrade into a less efficient form if the source and dest register cannot + /// be assigned to the same register. For example, this allows the x86 + /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which + /// is the same speed as the shift but has bigger code size. + /// + /// If this returns true, then the target must implement the + /// TargetInstrInfo::convertToThreeAddress method for this instruction, which + /// is allowed to fail if the transformation isn't valid for this specific + /// instruction (e.g. shl reg, 4 on x86). + /// + bool isConvertibleTo3Addr() const { + return Flags & (1 << TID::ConvertibleTo3Addr); + } + + /// usesCustomDAGSchedInsertionHook - Return true if this instruction requires + /// custom insertion support when the DAG scheduler is inserting it into a + /// machine basic block. If this is true for the instruction, it basically + /// means that it is a pseudo instruction used at SelectionDAG time that is + /// expanded out into magic code by the target when MachineInstrs are formed. + /// + /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method + /// is used to insert this into the MachineBasicBlock. + bool usesCustomDAGSchedInsertionHook() const { + return Flags & (1 << TID::UsesCustomDAGSchedInserter); + } + + /// isRematerializable - Returns true if this instruction is a candidate for + /// remat. This flag is deprecated, please don't use it anymore. If this + /// flag is set, the isReallyTriviallyReMaterializable() method is called to + /// verify the instruction is really rematable. + bool isRematerializable() const { + return Flags & (1 << TID::Rematerializable); + } + + /// isAsCheapAsAMove - Returns true if this instruction has the same cost (or + /// less) than a move instruction. This is useful during certain types of + /// optimizations (e.g., remat during two-address conversion or machine licm) + /// where we would like to remat or hoist the instruction, but not if it costs + /// more than moving the instruction into the appropriate register. Note, we + /// are not marking copies from and to the same register class with this flag. + bool isAsCheapAsAMove() const { + return Flags & (1 << TID::CheapAsAMove); + } +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h new file mode 100644 index 0000000..ecdd682 --- /dev/null +++ b/include/llvm/Target/TargetInstrInfo.h @@ -0,0 +1,517 @@ +//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the target machine instruction set to the code generator. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRINFO_H +#define LLVM_TARGET_TARGETINSTRINFO_H + +#include "llvm/Target/TargetInstrDesc.h" +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + +class TargetRegisterClass; +class TargetRegisterInfo; +class LiveVariables; +class CalleeSavedInfo; +class SDNode; +class SelectionDAG; + +template<class T> class SmallVectorImpl; + + +//--------------------------------------------------------------------------- +/// +/// TargetInstrInfo - Interface to description of machine instruction set +/// +class TargetInstrInfo { + const TargetInstrDesc *Descriptors; // Raw array to allow static init'n + unsigned NumOpcodes; // Number of entries in the desc array + + TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT + void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT +public: + TargetInstrInfo(const TargetInstrDesc *desc, unsigned NumOpcodes); + virtual ~TargetInstrInfo(); + + // Invariant opcodes: All instruction sets have these as their low opcodes. + enum { + PHI = 0, + INLINEASM = 1, + DBG_LABEL = 2, + EH_LABEL = 3, + GC_LABEL = 4, + DECLARE = 5, + + /// EXTRACT_SUBREG - This instruction takes two operands: a register + /// that has subregisters, and a subregister index. It returns the + /// extracted subregister value. This is commonly used to implement + /// truncation operations on target architectures which support it. + EXTRACT_SUBREG = 6, + + /// INSERT_SUBREG - This instruction takes three operands: a register + /// that has subregisters, a register providing an insert value, and a + /// subregister index. It returns the value of the first register with + /// the value of the second register inserted. The first register is + /// often defined by an IMPLICIT_DEF, as is commonly used to implement + /// anyext operations on target architectures which support it. + INSERT_SUBREG = 7, + + /// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef. + IMPLICIT_DEF = 8, + + /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except + /// that the first operand is an immediate integer constant. This constant + /// is often zero, as is commonly used to implement zext operations on + /// target architectures which support it, such as with x86-64 (with + /// zext from i32 to i64 via implicit zero-extension). + SUBREG_TO_REG = 9, + + /// COPY_TO_REGCLASS - This instruction is a placeholder for a plain + /// register-to-register copy into a specific register class. This is only + /// used between instruction selection and MachineInstr creation, before + /// virtual registers have been created for all the instructions, and it's + /// only needed in cases where the register classes implied by the + /// instructions are insufficient. The actual MachineInstrs to perform + /// the copy are emitted with the TargetInstrInfo::copyRegToReg hook. + COPY_TO_REGCLASS = 10 + }; + + unsigned getNumOpcodes() const { return NumOpcodes; } + + /// get - Return the machine instruction descriptor that corresponds to the + /// specified instruction opcode. + /// + const TargetInstrDesc &get(unsigned Opcode) const { + assert(Opcode < NumOpcodes && "Invalid opcode!"); + return Descriptors[Opcode]; + } + + /// isTriviallyReMaterializable - Return true if the instruction is trivially + /// rematerializable, meaning it has no side effects and requires no operands + /// that aren't always available. + bool isTriviallyReMaterializable(const MachineInstr *MI) const { + return MI->getDesc().isRematerializable() && + isReallyTriviallyReMaterializable(MI); + } + +protected: + /// isReallyTriviallyReMaterializable - For instructions with opcodes for + /// which the M_REMATERIALIZABLE flag is set, this function tests whether the + /// instruction itself is actually trivially rematerializable, considering + /// its operands. This is used for targets that have instructions that are + /// only trivially rematerializable for specific uses. This predicate must + /// return false if the instruction has any side effects other than + /// producing a value, or if it requres any address registers that are not + /// always available. + virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI) const { + return true; + } + +public: + /// Return true if the instruction is a register to register move and return + /// the source and dest operands and their sub-register indices by reference. + virtual bool isMoveInstr(const MachineInstr& MI, + unsigned& SrcReg, unsigned& DstReg, + unsigned& SrcSubIdx, unsigned& DstSubIdx) const { + return false; + } + + /// isLoadFromStackSlot - If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, + int &FrameIndex) const { + return 0; + } + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + virtual unsigned isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const { + return 0; + } + + /// reMaterialize - Re-issue the specified 'original' instruction at the + /// specific location targeting a new destination register. + virtual void reMaterialize(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, + const MachineInstr *Orig) const = 0; + + /// isInvariantLoad - Return true if the specified instruction (which is + /// marked mayLoad) is loading from a location whose value is invariant across + /// the function. For example, loading a value from the constant pool or from + /// from the argument area of a function if it does not change. This should + /// only return true of *all* loads the instruction does are invariant (if it + /// does multiple loads). + virtual bool isInvariantLoad(const MachineInstr *MI) const { + return false; + } + + /// convertToThreeAddress - This method must be implemented by targets that + /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target + /// may be able to convert a two-address instruction into one or more true + /// three-address instructions on demand. This allows the X86 target (for + /// example) to convert ADD and SHL instructions into LEA instructions if they + /// would require register copies due to two-addressness. + /// + /// This method returns a null pointer if the transformation cannot be + /// performed, otherwise it returns the last new instruction. + /// + virtual MachineInstr * + convertToThreeAddress(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const { + return 0; + } + + /// commuteInstruction - If a target has any instructions that are commutable, + /// but require converting to a different instruction or making non-trivial + /// changes to commute them, this method can overloaded to do this. The + /// default implementation of this method simply swaps the first two operands + /// of MI and returns it. + /// + /// If a target wants to make more aggressive changes, they can construct and + /// return a new machine instruction. If an instruction cannot commute, it + /// can also return null. + /// + /// If NewMI is true, then a new machine instruction must be created. + /// + virtual MachineInstr *commuteInstruction(MachineInstr *MI, + bool NewMI = false) const = 0; + + /// CommuteChangesDestination - Return true if commuting the specified + /// instruction will also changes the destination operand. Also return the + /// current operand index of the would be new destination register by + /// reference. This can happen when the commutable instruction is also a + /// two-address instruction. + virtual bool CommuteChangesDestination(MachineInstr *MI, + unsigned &OpIdx) const = 0; + + /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning + /// true if it cannot be understood (e.g. it's a switch dispatch or isn't + /// implemented for a target). Upon success, this returns false and returns + /// with the following information in various cases: + /// + /// 1. If this block ends with no branches (it just falls through to its succ) + /// just return false, leaving TBB/FBB null. + /// 2. If this block ends with only an unconditional branch, it sets TBB to be + /// the destination block. + /// 3. If this block ends with an conditional branch and it falls through to + /// an successor block, it sets TBB to be the branch destination block and + /// a list of operands that evaluate the condition. These + /// operands can be passed to other TargetInstrInfo methods to create new + /// branches. + /// 4. If this block ends with an conditional branch and an unconditional + /// block, it returns the 'true' destination in TBB, the 'false' + /// destination in FBB, and a list of operands that evaluate the condition. + /// These operands can be passed to other TargetInstrInfo methods to create + /// new branches. + /// + /// Note that RemoveBranch and InsertBranch must be implemented to support + /// cases where this method returns success. + /// + /// If AllowModify is true, then this routine is allowed to modify the basic + /// block (e.g. delete instructions after the unconditional branch). + /// + virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify = false) const { + return true; + } + + /// RemoveBranch - Remove the branching code at the end of the specific MBB. + /// This is only invoked in cases where AnalyzeBranch returns success. It + /// returns the number of instructions that were removed. + virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const { + assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!"); + return 0; + } + + /// InsertBranch - Insert a branch into the end of the specified + /// MachineBasicBlock. This operands to this method are the same as those + /// returned by AnalyzeBranch. This is invoked in cases where AnalyzeBranch + /// returns success and when an unconditional branch (TBB is non-null, FBB is + /// null, Cond is empty) needs to be inserted. It returns the number of + /// instructions inserted. + /// + /// It is also invoked by tail merging to add unconditional branches in + /// cases where AnalyzeBranch doesn't apply because there was no original + /// branch to analyze. At least this much must be implemented, else tail + /// merging needs to be disabled. + virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond) const { + assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!"); + return 0; + } + + /// copyRegToReg - Emit instructions to copy between a pair of registers. It + /// returns false if the target does not how to copy between the specified + /// registers. + virtual bool copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC) const { + assert(0 && "Target didn't implement TargetInstrInfo::copyRegToReg!"); + return false; + } + + /// storeRegToStackSlot - Store the specified register of the given register + /// class to the specified stack frame index. The store instruction is to be + /// added to the given machine basic block before the specified machine + /// instruction. If isKill is true, the register operand is the last use and + /// must be marked kill. + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC) const { + assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!"); + } + + /// storeRegToAddr - Store the specified register of the given register class + /// to the specified address. The store instruction is to be added to the + /// given machine basic block before the specified machine instruction. If + /// isKill is true, the register operand is the last use and must be marked + /// kill. + virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, + SmallVectorImpl<MachineOperand> &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl<MachineInstr*> &NewMIs) const { + assert(0 && "Target didn't implement TargetInstrInfo::storeRegToAddr!"); + } + + /// loadRegFromStackSlot - Load the specified register of the given register + /// class from the specified stack frame index. The load instruction is to be + /// added to the given machine basic block before the specified machine + /// instruction. + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const { + assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!"); + } + + /// loadRegFromAddr - Load the specified register of the given register class + /// class from the specified address. The load instruction is to be added to + /// the given machine basic block before the specified machine instruction. + virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, + SmallVectorImpl<MachineOperand> &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl<MachineInstr*> &NewMIs) const { + assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromAddr!"); + } + + /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee + /// saved registers and returns true if it isn't possible / profitable to do + /// so by issuing a series of store instructions via + /// storeRegToStackSlot(). Returns false otherwise. + virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const { + return false; + } + + /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee + /// saved registers and returns true if it isn't possible / profitable to do + /// so by issuing a series of load instructions via loadRegToStackSlot(). + /// Returns false otherwise. + virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector<CalleeSavedInfo> &CSI) const { + return false; + } + + /// foldMemoryOperand - Attempt to fold a load or store of the specified stack + /// slot into the specified machine instruction for the specified operand(s). + /// If this is possible, a new instruction is returned with the specified + /// operand folded, otherwise NULL is returned. The client is responsible for + /// removing the old instruction and adding the new one in the instruction + /// stream. + MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + int FrameIndex) const; + + /// foldMemoryOperand - Same as the previous version except it allows folding + /// of any load and store from / to any address, not just from a specific + /// stack slot. + MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + MachineInstr* LoadMI) const; + +protected: + /// foldMemoryOperandImpl - Target-dependent implementation for + /// foldMemoryOperand. Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + int FrameIndex) const { + return 0; + } + + /// foldMemoryOperandImpl - Target-dependent implementation for + /// foldMemoryOperand. Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl<unsigned> &Ops, + MachineInstr* LoadMI) const { + return 0; + } + +public: + /// canFoldMemoryOperand - Returns true for the specified load / store if + /// folding is possible. + virtual + bool canFoldMemoryOperand(const MachineInstr *MI, + const SmallVectorImpl<unsigned> &Ops) const { + return false; + } + + /// unfoldMemoryOperand - Separate a single instruction which folded a load or + /// a store or a load and a store into two or more instruction. If this is + /// possible, returns true as well as the new instructions by reference. + virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, + unsigned Reg, bool UnfoldLoad, bool UnfoldStore, + SmallVectorImpl<MachineInstr*> &NewMIs) const{ + return false; + } + + virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, + SmallVectorImpl<SDNode*> &NewNodes) const { + return false; + } + + /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new + /// instruction after load / store are unfolded from an instruction of the + /// specified opcode. It returns zero if the specified unfolding is not + /// possible. + virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, + bool UnfoldLoad, bool UnfoldStore) const { + return 0; + } + + /// BlockHasNoFallThrough - Return true if the specified block does not + /// fall-through into its successor block. This is primarily used when a + /// branch is unanalyzable. It is useful for things like unconditional + /// indirect branches (jump tables). + virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const { + return false; + } + + /// ReverseBranchCondition - Reverses the branch condition of the specified + /// condition list, returning false on success and true if it cannot be + /// reversed. + virtual + bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { + return true; + } + + /// insertNoop - Insert a noop into the instruction stream at the specified + /// point. + virtual void insertNoop(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const { + assert(0 && "Target didn't implement insertNoop!"); + abort(); + } + + /// isPredicated - Returns true if the instruction is already predicated. + /// + virtual bool isPredicated(const MachineInstr *MI) const { + return false; + } + + /// isUnpredicatedTerminator - Returns true if the instruction is a + /// terminator instruction that has not been predicated. + virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const; + + /// PredicateInstruction - Convert the instruction into a predicated + /// instruction. It returns true if the operation was successful. + virtual + bool PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const = 0; + + /// SubsumesPredicate - Returns true if the first specified predicate + /// subsumes the second, e.g. GE subsumes GT. + virtual + bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, + const SmallVectorImpl<MachineOperand> &Pred2) const { + return false; + } + + /// DefinesPredicate - If the specified instruction defines any predicate + /// or condition code register(s) used for predication, returns true as well + /// as the definition predicate(s) by reference. + virtual bool DefinesPredicate(MachineInstr *MI, + std::vector<MachineOperand> &Pred) const { + return false; + } + + /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine + /// instruction that defines the specified register class. + virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { + return true; + } + + /// GetInstSize - Returns the size of the specified Instruction. + /// + virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const { + assert(0 && "Target didn't implement TargetInstrInfo::GetInstSize!"); + return 0; + } + + /// GetFunctionSizeInBytes - Returns the size of the specified MachineFunction. + /// + virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const = 0; +}; + +/// TargetInstrInfoImpl - This is the default implementation of +/// TargetInstrInfo, which just provides a couple of default implementations +/// for various methods. This separated out because it is implemented in +/// libcodegen, not in libtarget. +class TargetInstrInfoImpl : public TargetInstrInfo { +protected: + TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes) + : TargetInstrInfo(desc, NumOpcodes) {} +public: + virtual MachineInstr *commuteInstruction(MachineInstr *MI, + bool NewMI = false) const; + virtual bool CommuteChangesDestination(MachineInstr *MI, + unsigned &OpIdx) const; + virtual bool PredicateInstruction(MachineInstr *MI, + const SmallVectorImpl<MachineOperand> &Pred) const; + virtual void reMaterialize(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, + const MachineInstr *Orig) const; + virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const; +}; + +/// getInstrOperandRegClass - Return register class of the operand of an +/// instruction of the specified TargetInstrDesc. +const TargetRegisterClass* +getInstrOperandRegClass(const TargetRegisterInfo *TRI, + const TargetInstrDesc &II, unsigned Op); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetInstrItineraries.h b/include/llvm/Target/TargetInstrItineraries.h new file mode 100644 index 0000000..18931ea --- /dev/null +++ b/include/llvm/Target/TargetInstrItineraries.h @@ -0,0 +1,99 @@ +//===-- llvm/Target/TargetInstrItineraries.h - Scheduling -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the structures used for instruction itineraries and +// states. This is used by schedulers to determine instruction states and +// latencies. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRITINERARIES_H +#define LLVM_TARGET_TARGETINSTRITINERARIES_H + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// Instruction stage - These values represent a step in the execution of an +/// instruction. The latency represents the number of discrete time slots used +/// need to complete the stage. Units represent the choice of functional units +/// that can be used to complete the stage. Eg. IntUnit1, IntUnit2. +/// +struct InstrStage { + unsigned Cycles; ///< Length of stage in machine cycles + unsigned Units; ///< Choice of functional units +}; + + +//===----------------------------------------------------------------------===// +/// Instruction itinerary - An itinerary represents a sequential series of steps +/// required to complete an instruction. Itineraries are represented as +/// sequences of instruction stages. +/// +struct InstrItinerary { + unsigned First; ///< Index of first stage in itinerary + unsigned Last; ///< Index of last + 1 stage in itinerary +}; + + + +//===----------------------------------------------------------------------===// +/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be +/// used by a target. +/// +struct InstrItineraryData { + const InstrStage *Stages; ///< Array of stages selected + const InstrItinerary *Itineratries; ///< Array of itineraries selected + + /// Ctors. + /// + InstrItineraryData() : Stages(0), Itineratries(0) {} + InstrItineraryData(const InstrStage *S, const InstrItinerary *I) + : Stages(S), Itineratries(I) {} + + /// isEmpty - Returns true if there are no itineraries. + /// + bool isEmpty() const { return Itineratries == 0; } + + /// begin - Return the first stage of the itinerary. + /// + const InstrStage *begin(unsigned ItinClassIndx) const { + unsigned StageIdx = Itineratries[ItinClassIndx].First; + return Stages + StageIdx; + } + + /// end - Return the last+1 stage of the itinerary. + /// + const InstrStage *end(unsigned ItinClassIndx) const { + unsigned StageIdx = Itineratries[ItinClassIndx].Last; + return Stages + StageIdx; + } + + /// getLatency - Return the scheduling latency of the given class. A + /// simple latency value for an instruction is an over-simplification + /// for some architectures, but it's a reasonable first approximation. + /// + unsigned getLatency(unsigned ItinClassIndx) const { + // If the target doesn't provide latency information, use a simple + // non-zero default value for all instructions. + if (isEmpty()) + return 1; + + // Just sum the cycle count for each stage. + unsigned Latency = 0; + for (const InstrStage *IS = begin(ItinClassIndx), *E = end(ItinClassIndx); + IS != E; ++IS) + Latency += IS->Cycles; + return Latency; + } +}; + + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h new file mode 100644 index 0000000..c14275f --- /dev/null +++ b/include/llvm/Target/TargetIntrinsicInfo.h @@ -0,0 +1,61 @@ +//===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the target intrinsic instructions to the code generator. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H +#define LLVM_TARGET_TARGETINTRINSICINFO_H + +namespace llvm { + +class Function; +class Module; +class Type; + +//--------------------------------------------------------------------------- +/// +/// TargetIntrinsicInfo - Interface to description of machine instruction set +/// +class TargetIntrinsicInfo { + + const char **Intrinsics; // Raw array to allow static init'n + unsigned NumIntrinsics; // Number of entries in the desc array + + TargetIntrinsicInfo(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT + void operator=(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT +public: + TargetIntrinsicInfo(const char **desc, unsigned num); + virtual ~TargetIntrinsicInfo(); + + unsigned getNumIntrinsics() const { return NumIntrinsics; } + + virtual Function *getDeclaration(Module *M, const char *BuiltinName) const { + return 0; + } + + // Returns the Function declaration for intrinsic BuiltinName. If the + // intrinsic can be overloaded, uses Tys to return the correct function. + virtual Function *getDeclaration(Module *M, const char *BuiltinName, + const Type **Tys, unsigned numTys) const { + return 0; + } + + // Returns true if the Builtin can be overloaded. + virtual bool isOverloaded(Module *M, const char *BuiltinName) const { + return false; + } + + virtual unsigned getIntrinsicID(Function *F) const { return 0; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetJITInfo.h b/include/llvm/Target/TargetJITInfo.h new file mode 100644 index 0000000..9545689 --- /dev/null +++ b/include/llvm/Target/TargetJITInfo.h @@ -0,0 +1,135 @@ +//===- Target/TargetJITInfo.h - Target Information for JIT ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes an abstract interface used by the Just-In-Time code +// generator to perform target-specific activities, such as emitting stubs. If +// a TargetMachine supports JIT code generation, it should provide one of these +// objects through the getJITInfo() method. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETJITINFO_H +#define LLVM_TARGET_TARGETJITINFO_H + +#include <cassert> +#include "llvm/Support/DataTypes.h" + +namespace llvm { + class Function; + class GlobalValue; + class JITCodeEmitter; + class MachineRelocation; + + /// TargetJITInfo - Target specific information required by the Just-In-Time + /// code generator. + class TargetJITInfo { + public: + virtual ~TargetJITInfo() {} + + /// replaceMachineCodeForFunction - Make it so that calling the function + /// whose machine code is at OLD turns into a call to NEW, perhaps by + /// overwriting OLD with a branch to NEW. This is used for self-modifying + /// code. + /// + virtual void replaceMachineCodeForFunction(void *Old, void *New) = 0; + + /// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object + /// to emit an indirect symbol which contains the address of the specified + /// ptr. + virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, + JITCodeEmitter &JCE) { + assert(0 && "This target doesn't implement emitGlobalValueIndirectSym!"); + return 0; + } + + /// emitFunctionStub - Use the specified JITCodeEmitter object to emit a + /// small native function that simply calls the function at the specified + /// address. Return the address of the resultant function. + virtual void *emitFunctionStub(const Function* F, void *Fn, + JITCodeEmitter &JCE) { + assert(0 && "This target doesn't implement emitFunctionStub!"); + return 0; + } + + /// emitFunctionStubAtAddr - Use the specified JITCodeEmitter object to + /// emit a small native function that simply calls Fn. Emit the stub into + /// the supplied buffer. + virtual void emitFunctionStubAtAddr(const Function* F, void *Fn, + void *Buffer, JITCodeEmitter &JCE) { + assert(0 && "This target doesn't implement emitFunctionStubAtAddr!"); + } + + /// getPICJumpTableEntry - Returns the value of the jumptable entry for the + /// specific basic block. + virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) { + assert(0 && "This target doesn't implement getPICJumpTableEntry!"); + return 0; + } + + /// LazyResolverFn - This typedef is used to represent the function that + /// unresolved call points should invoke. This is a target specific + /// function that knows how to walk the stack and find out which stub the + /// call is coming from. + typedef void (*LazyResolverFn)(); + + /// JITCompilerFn - This typedef is used to represent the JIT function that + /// lazily compiles the function corresponding to a stub. The JIT keeps + /// track of the mapping between stubs and LLVM Functions, the target + /// provides the ability to figure out the address of a stub that is called + /// by the LazyResolverFn. + typedef void* (*JITCompilerFn)(void *); + + /// getLazyResolverFunction - This method is used to initialize the JIT, + /// giving the target the function that should be used to compile a + /// function, and giving the JIT the target function used to do the lazy + /// resolving. + virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn) { + assert(0 && "Not implemented for this target!"); + return 0; + } + + /// relocate - Before the JIT can run a block of code that has been emitted, + /// it must rewrite the code to contain the actual addresses of any + /// referenced global symbols. + virtual void relocate(void *Function, MachineRelocation *MR, + unsigned NumRelocs, unsigned char* GOTBase) { + assert(NumRelocs == 0 && "This target does not have relocations!"); + } + + + /// allocateThreadLocalMemory - Each target has its own way of + /// handling thread local variables. This method returns a value only + /// meaningful to the target. + virtual char* allocateThreadLocalMemory(size_t size) { + assert(0 && "This target does not implement thread local storage!"); + return 0; + } + + /// needsGOT - Allows a target to specify that it would like the + /// JIT to manage a GOT for it. + bool needsGOT() const { return useGOT; } + + /// hasCustomConstantPool - Allows a target to specify that constant + /// pool address resolution is handled by the target. + virtual bool hasCustomConstantPool() const { return false; } + + /// hasCustomJumpTables - Allows a target to specify that jumptables + /// are emitted by the target. + virtual bool hasCustomJumpTables() const { return false; } + + /// allocateSeparateGVMemory - If true, globals should be placed in + /// separately allocated heap memory rather than in the same + /// code memory allocated by JITCodeEmitter. + virtual bool allocateSeparateGVMemory() const { return false; } + protected: + bool useGOT; + }; +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h new file mode 100644 index 0000000..163f4c5 --- /dev/null +++ b/include/llvm/Target/TargetLowering.h @@ -0,0 +1,1676 @@ +//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes how to lower LLVM code to machine code. This has two +// main components: +// +// 1. Which ValueTypes are natively supported by the target. +// 2. Which operations are supported for supported ValueTypes. +// 3. Cost thresholds for alternative implementations of certain operations. +// +// In addition it has a few other components, like information about FP +// immediates. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETLOWERING_H +#define LLVM_TARGET_TARGETLOWERING_H + +#include "llvm/InlineAsm.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/CodeGen/RuntimeLibcalls.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/DebugLoc.h" +#include "llvm/Target/TargetMachine.h" +#include <climits> +#include <map> +#include <vector> + +namespace llvm { + class AllocaInst; + class CallInst; + class Function; + class FastISel; + class MachineBasicBlock; + class MachineFunction; + class MachineFrameInfo; + class MachineInstr; + class MachineModuleInfo; + class DwarfWriter; + class SDNode; + class SDValue; + class SelectionDAG; + class TargetData; + class TargetMachine; + class TargetRegisterClass; + class TargetSubtarget; + class Value; + + // FIXME: should this be here? + namespace TLSModel { + enum Model { + GeneralDynamic, + LocalDynamic, + InitialExec, + LocalExec + }; + } + TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc); + + +//===----------------------------------------------------------------------===// +/// TargetLowering - This class defines information used to lower LLVM code to +/// legal SelectionDAG operators that the target instruction selector can accept +/// natively. +/// +/// This class also defines callbacks that targets must implement to lower +/// target-specific constructs to SelectionDAG operators. +/// +class TargetLowering { +public: + /// LegalizeAction - This enum indicates whether operations are valid for a + /// target, and if not, what action should be used to make them valid. + enum LegalizeAction { + Legal, // The target natively supports this operation. + Promote, // This operation should be executed in a larger type. + Expand, // Try to expand this to other ops, otherwise use a libcall. + Custom // Use the LowerOperation hook to implement custom lowering. + }; + + enum OutOfRangeShiftAmount { + Undefined, // Oversized shift amounts are undefined (default). + Mask, // Shift amounts are auto masked (anded) to value size. + Extend // Oversized shift pulls in zeros or sign bits. + }; + + enum BooleanContent { // How the target represents true/false values. + UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. + ZeroOrOneBooleanContent, // All bits zero except for bit 0. + ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. + }; + + enum SchedPreference { + SchedulingForLatency, // Scheduling for shortest total latency. + SchedulingForRegPressure // Scheduling for lowest register pressure. + }; + + explicit TargetLowering(TargetMachine &TM); + virtual ~TargetLowering(); + + TargetMachine &getTargetMachine() const { return TM; } + const TargetData *getTargetData() const { return TD; } + + bool isBigEndian() const { return !IsLittleEndian; } + bool isLittleEndian() const { return IsLittleEndian; } + MVT getPointerTy() const { return PointerTy; } + MVT getShiftAmountTy() const { return ShiftAmountTy; } + OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; } + + /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC + /// codegen. + bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; } + + /// isSelectExpensive - Return true if the select operation is expensive for + /// this target. + bool isSelectExpensive() const { return SelectIsExpensive; } + + /// isIntDivCheap() - Return true if integer divide is usually cheaper than + /// a sequence of several shifts, adds, and multiplies for this target. + bool isIntDivCheap() const { return IntDivIsCheap; } + + /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of + /// srl/add/sra. + bool isPow2DivCheap() const { return Pow2DivIsCheap; } + + /// getSetCCResultType - Return the ValueType of the result of SETCC + /// operations. Also used to obtain the target's preferred type for + /// the condition operand of SELECT and BRCOND nodes. In the case of + /// BRCOND the argument passed is MVT::Other since there are no other + /// operands to get a type hint from. + virtual MVT getSetCCResultType(MVT VT) const; + + /// getBooleanContents - For targets without i1 registers, this gives the + /// nature of the high-bits of boolean values held in types wider than i1. + /// "Boolean values" are special true/false values produced by nodes like + /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. + /// Not to be confused with general values promoted from i1. + BooleanContent getBooleanContents() const { return BooleanContents;} + + /// getSchedulingPreference - Return target scheduling preference. + SchedPreference getSchedulingPreference() const { + return SchedPreferenceInfo; + } + + /// getRegClassFor - Return the register class that should be used for the + /// specified value type. This may only be called on legal types. + TargetRegisterClass *getRegClassFor(MVT VT) const { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()]; + assert(RC && "This value type is not natively supported!"); + return RC; + } + + /// isTypeLegal - Return true if the target has native support for the + /// specified value type. This means that it has a register that directly + /// holds it without promotions or expansions. + bool isTypeLegal(MVT VT) const { + assert(!VT.isSimple() || + (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0; + } + + class ValueTypeActionImpl { + /// ValueTypeActions - This is a bitvector that contains two bits for each + /// value type, where the two bits correspond to the LegalizeAction enum. + /// This can be queried with "getTypeAction(VT)". + uint32_t ValueTypeActions[2]; + public: + ValueTypeActionImpl() { + ValueTypeActions[0] = ValueTypeActions[1] = 0; + } + ValueTypeActionImpl(const ValueTypeActionImpl &RHS) { + ValueTypeActions[0] = RHS.ValueTypeActions[0]; + ValueTypeActions[1] = RHS.ValueTypeActions[1]; + } + + LegalizeAction getTypeAction(MVT VT) const { + if (VT.isExtended()) { + if (VT.isVector()) { + return VT.isPow2VectorType() ? Expand : Promote; + } + if (VT.isInteger()) + // First promote to a power-of-two size, then expand if necessary. + return VT == VT.getRoundIntegerType() ? Expand : Promote; + assert(0 && "Unsupported extended type!"); + return Legal; + } + unsigned I = VT.getSimpleVT(); + assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); + return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3); + } + void setTypeAction(MVT VT, LegalizeAction Action) { + unsigned I = VT.getSimpleVT(); + assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); + ValueTypeActions[I>>4] |= Action << ((I*2) & 31); + } + }; + + const ValueTypeActionImpl &getValueTypeActions() const { + return ValueTypeActions; + } + + /// getTypeAction - Return how we should legalize values of this type, either + /// it is already legal (return 'Legal') or we need to promote it to a larger + /// type (return 'Promote'), or we need to expand it into multiple registers + /// of smaller integer type (return 'Expand'). 'Custom' is not an option. + LegalizeAction getTypeAction(MVT VT) const { + return ValueTypeActions.getTypeAction(VT); + } + + /// getTypeToTransformTo - For types supported by the target, this is an + /// identity function. For types that must be promoted to larger types, this + /// returns the larger type to promote to. For integer types that are larger + /// than the largest integer register, this contains one step in the expansion + /// to get to the smaller register. For illegal floating point types, this + /// returns the integer type to transform to. + MVT getTypeToTransformTo(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType)); + MVT NVT = TransformToType[VT.getSimpleVT()]; + assert(getTypeAction(NVT) != Promote && + "Promote may not follow Expand or Promote"); + return NVT; + } + + if (VT.isVector()) { + MVT NVT = VT.getPow2VectorType(); + if (NVT == VT) { + // Vector length is a power of 2 - split to half the size. + unsigned NumElts = VT.getVectorNumElements(); + MVT EltVT = VT.getVectorElementType(); + return (NumElts == 1) ? EltVT : MVT::getVectorVT(EltVT, NumElts / 2); + } + // Promote to a power of two size, avoiding multi-step promotion. + return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + } else if (VT.isInteger()) { + MVT NVT = VT.getRoundIntegerType(); + if (NVT == VT) + // Size is a power of two - expand to half the size. + return MVT::getIntegerVT(VT.getSizeInBits() / 2); + else + // Promote to a power of two size, avoiding multi-step promotion. + return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + } + assert(0 && "Unsupported extended type!"); + return MVT(); // Not reached + } + + /// getTypeToExpandTo - For types supported by the target, this is an + /// identity function. For types that must be expanded (i.e. integer types + /// that are larger than the largest integer register or illegal floating + /// point types), this returns the largest legal type it will be expanded to. + MVT getTypeToExpandTo(MVT VT) const { + assert(!VT.isVector()); + while (true) { + switch (getTypeAction(VT)) { + case Legal: + return VT; + case Expand: + VT = getTypeToTransformTo(VT); + break; + default: + assert(false && "Type is not legal nor is it to be expanded!"); + return VT; + } + } + return VT; + } + + /// getVectorTypeBreakdown - Vector types are broken down into some number of + /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 + /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. + /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. + /// + /// This method returns the number of registers needed, and the VT for each + /// register. It also returns the VT and quantity of the intermediate values + /// before they are promoted/expanded. + /// + unsigned getVectorTypeBreakdown(MVT VT, + MVT &IntermediateVT, + unsigned &NumIntermediates, + MVT &RegisterVT) const; + + /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the + /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If + /// this is the case, it returns true and store the intrinsic + /// information into the IntrinsicInfo that was passed to the function. + typedef struct IntrinsicInfo { + unsigned opc; // target opcode + MVT memVT; // memory VT + const Value* ptrVal; // value representing memory location + int offset; // offset off of ptrVal + unsigned align; // alignment + bool vol; // is volatile? + bool readMem; // reads memory? + bool writeMem; // writes memory? + } IntrinisicInfo; + + virtual bool getTgtMemIntrinsic(IntrinsicInfo& Info, + CallInst &I, unsigned Intrinsic) { + return false; + } + + /// getWidenVectorType: given a vector type, returns the type to widen to + /// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. + /// If there is no vector type that we want to widen to, returns MVT::Other + /// When and were to widen is target dependent based on the cost of + /// scalarizing vs using the wider vector type. + virtual MVT getWidenVectorType(MVT VT) const; + + typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator; + legal_fpimm_iterator legal_fpimm_begin() const { + return LegalFPImmediates.begin(); + } + legal_fpimm_iterator legal_fpimm_end() const { + return LegalFPImmediates.end(); + } + + /// isShuffleMaskLegal - Targets can use this to indicate that they only + /// support *some* VECTOR_SHUFFLE operations, those with specific masks. + /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values + /// are assumed to be legal. + virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, + MVT VT) const { + return true; + } + + /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is + /// used by Targets can use this to indicate if there is a suitable + /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant + /// pool entry. + virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, + MVT VT) const { + return false; + } + + /// getOperationAction - Return how this operation should be treated: either + /// it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction getOperationAction(unsigned Op, MVT VT) const { + if (VT.isExtended()) return Expand; + assert(Op < array_lengthof(OpActions) && + (unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((OpActions[Op] >> (2*VT.getSimpleVT())) & 3); + } + + /// isOperationLegalOrCustom - Return true if the specified operation is + /// legal on this target or can be made legal with custom lowering. This + /// is used to help guide high-level lowering decisions. + bool isOperationLegalOrCustom(unsigned Op, MVT VT) const { + return (VT == MVT::Other || isTypeLegal(VT)) && + (getOperationAction(Op, VT) == Legal || + getOperationAction(Op, VT) == Custom); + } + + /// isOperationLegal - Return true if the specified operation is legal on this + /// target. + bool isOperationLegal(unsigned Op, MVT VT) const { + return (VT == MVT::Other || isTypeLegal(VT)) && + getOperationAction(Op, VT) == Legal; + } + + /// getLoadExtAction - Return how this load with extension should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction getLoadExtAction(unsigned LType, MVT VT) const { + assert(LType < array_lengthof(LoadExtActions) && + (unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3); + } + + /// isLoadExtLegal - Return true if the specified load with extension is legal + /// on this target. + bool isLoadExtLegal(unsigned LType, MVT VT) const { + return VT.isSimple() && + (getLoadExtAction(LType, VT) == Legal || + getLoadExtAction(LType, VT) == Custom); + } + + /// getTruncStoreAction - Return how this store with truncation should be + /// treated: either it is legal, needs to be promoted to a larger size, needs + /// to be expanded to some other code sequence, or the target has a custom + /// expander for it. + LegalizeAction getTruncStoreAction(MVT ValVT, + MVT MemVT) const { + assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >> + (2*MemVT.getSimpleVT())) & 3); + } + + /// isTruncStoreLegal - Return true if the specified store with truncation is + /// legal on this target. + bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const { + return isTypeLegal(ValVT) && MemVT.isSimple() && + (getTruncStoreAction(ValVT, MemVT) == Legal || + getTruncStoreAction(ValVT, MemVT) == Custom); + } + + /// getIndexedLoadAction - Return how the indexed load should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getIndexedLoadAction(unsigned IdxMode, MVT VT) const { + assert(IdxMode < array_lengthof(IndexedModeActions[0]) && + (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0][0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> + (2*VT.getSimpleVT())) & 3); + } + + /// isIndexedLoadLegal - Return true if the specified indexed load is legal + /// on this target. + bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const { + return VT.isSimple() && + (getIndexedLoadAction(IdxMode, VT) == Legal || + getIndexedLoadAction(IdxMode, VT) == Custom); + } + + /// getIndexedStoreAction - Return how the indexed store should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getIndexedStoreAction(unsigned IdxMode, MVT VT) const { + assert(IdxMode < array_lengthof(IndexedModeActions[1]) && + (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> + (2*VT.getSimpleVT())) & 3); + } + + /// isIndexedStoreLegal - Return true if the specified indexed load is legal + /// on this target. + bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const { + return VT.isSimple() && + (getIndexedStoreAction(IdxMode, VT) == Legal || + getIndexedStoreAction(IdxMode, VT) == Custom); + } + + /// getConvertAction - Return how the conversion should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getConvertAction(MVT FromVT, MVT ToVT) const { + assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + "Table isn't big enough!"); + return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >> + (2*ToVT.getSimpleVT())) & 3); + } + + /// isConvertLegal - Return true if the specified conversion is legal + /// on this target. + bool isConvertLegal(MVT FromVT, MVT ToVT) const { + return isTypeLegal(FromVT) && isTypeLegal(ToVT) && + (getConvertAction(FromVT, ToVT) == Legal || + getConvertAction(FromVT, ToVT) == Custom); + } + + /// getCondCodeAction - Return how the condition code should be treated: + /// either it is legal, needs to be expanded to some other code sequence, + /// or the target has a custom expander for it. + LegalizeAction + getCondCodeAction(ISD::CondCode CC, MVT VT) const { + assert((unsigned)CC < array_lengthof(CondCodeActions) && + (unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && + "Table isn't big enough!"); + LegalizeAction Action = (LegalizeAction) + ((CondCodeActions[CC] >> (2*VT.getSimpleVT())) & 3); + assert(Action != Promote && "Can't promote condition code!"); + return Action; + } + + /// isCondCodeLegal - Return true if the specified condition code is legal + /// on this target. + bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { + return getCondCodeAction(CC, VT) == Legal || + getCondCodeAction(CC, VT) == Custom; + } + + + /// getTypeToPromoteTo - If the action for this operation is to promote, this + /// method returns the ValueType to promote to. + MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { + assert(getOperationAction(Op, VT) == Promote && + "This operation isn't promoted!"); + + // See if this has an explicit type specified. + std::map<std::pair<unsigned, MVT::SimpleValueType>, + MVT::SimpleValueType>::const_iterator PTTI = + PromoteToType.find(std::make_pair(Op, VT.getSimpleVT())); + if (PTTI != PromoteToType.end()) return PTTI->second; + + assert((VT.isInteger() || VT.isFloatingPoint()) && + "Cannot autopromote this type, add it with AddPromotedToType."); + + MVT NVT = VT; + do { + NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1); + assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && + "Didn't find type to promote to!"); + } while (!isTypeLegal(NVT) || + getOperationAction(Op, NVT) == Promote); + return NVT; + } + + /// getValueType - Return the MVT corresponding to this LLVM type. + /// This is fixed by the LLVM operations except for the pointer size. If + /// AllowUnknown is true, this will return MVT::Other for types with no MVT + /// counterpart (e.g. structs), otherwise it will assert. + MVT getValueType(const Type *Ty, bool AllowUnknown = false) const { + MVT VT = MVT::getMVT(Ty, AllowUnknown); + return VT == MVT::iPTR ? PointerTy : VT; + } + + /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate + /// function arguments in the caller parameter area. This is the actual + /// alignment, not its logarithm. + virtual unsigned getByValTypeAlignment(const Type *Ty) const; + + /// getRegisterType - Return the type of registers that this ValueType will + /// eventually require. + MVT getRegisterType(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT)); + return RegisterTypeForVT[VT.getSimpleVT()]; + } + if (VT.isVector()) { + MVT VT1, RegisterVT; + unsigned NumIntermediates; + (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); + return RegisterVT; + } + if (VT.isInteger()) { + return getRegisterType(getTypeToTransformTo(VT)); + } + assert(0 && "Unsupported extended type!"); + return MVT(); // Not reached + } + + /// getNumRegisters - Return the number of registers that this ValueType will + /// eventually require. This is one for any types promoted to live in larger + /// registers, but may be more than one for types (like i64) that are split + /// into pieces. For types like i140, which are first promoted then expanded, + /// it is the number of registers needed to hold all the bits of the original + /// type. For an i140 on a 32 bit machine this means 5 registers. + unsigned getNumRegisters(MVT VT) const { + if (VT.isSimple()) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT)); + return NumRegistersForVT[VT.getSimpleVT()]; + } + if (VT.isVector()) { + MVT VT1, VT2; + unsigned NumIntermediates; + return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); + } + if (VT.isInteger()) { + unsigned BitWidth = VT.getSizeInBits(); + unsigned RegWidth = getRegisterType(VT).getSizeInBits(); + return (BitWidth + RegWidth - 1) / RegWidth; + } + assert(0 && "Unsupported extended type!"); + return 0; // Not reached + } + + /// ShouldShrinkFPConstant - If true, then instruction selection should + /// seek to shrink the FP constant of the specified type to a smaller type + /// in order to save space and / or reduce runtime. + virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; } + + /// hasTargetDAGCombine - If true, the target has custom DAG combine + /// transformations that it can perform for the specified node. + bool hasTargetDAGCombine(ISD::NodeType NT) const { + assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); + return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); + } + + /// This function returns the maximum number of store operations permitted + /// to replace a call to llvm.memset. The value is set by the target at the + /// performance threshold for such a replacement. + /// @brief Get maximum # of store operations permitted for llvm.memset + unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; } + + /// This function returns the maximum number of store operations permitted + /// to replace a call to llvm.memcpy. The value is set by the target at the + /// performance threshold for such a replacement. + /// @brief Get maximum # of store operations permitted for llvm.memcpy + unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; } + + /// This function returns the maximum number of store operations permitted + /// to replace a call to llvm.memmove. The value is set by the target at the + /// performance threshold for such a replacement. + /// @brief Get maximum # of store operations permitted for llvm.memmove + unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; } + + /// This function returns true if the target allows unaligned memory accesses. + /// This is used, for example, in situations where an array copy/move/set is + /// converted to a sequence of store operations. It's use helps to ensure that + /// such replacements don't generate code that causes an alignment error + /// (trap) on the target machine. + /// @brief Determine if the target supports unaligned memory accesses. + bool allowsUnalignedMemoryAccesses() const { + return allowUnalignedMemoryAccesses; + } + + /// This function returns true if the target would benefit from code placement + /// optimization. + /// @brief Determine if the target should perform code placement optimization. + bool shouldOptimizeCodePlacement() const { + return benefitFromCodePlacementOpt; + } + + /// getOptimalMemOpType - Returns the target specific optimal type for load + /// and store operations as a result of memset, memcpy, and memmove lowering. + /// It returns MVT::iAny if SelectionDAG should be responsible for + /// determining it. + virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr) const { + return MVT::iAny; + } + + /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp + /// to implement llvm.setjmp. + bool usesUnderscoreSetJmp() const { + return UseUnderscoreSetJmp; + } + + /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp + /// to implement llvm.longjmp. + bool usesUnderscoreLongJmp() const { + return UseUnderscoreLongJmp; + } + + /// getStackPointerRegisterToSaveRestore - If a physical register, this + /// specifies the register that llvm.savestack/llvm.restorestack should save + /// and restore. + unsigned getStackPointerRegisterToSaveRestore() const { + return StackPointerRegisterToSaveRestore; + } + + /// getExceptionAddressRegister - If a physical register, this returns + /// the register that receives the exception address on entry to a landing + /// pad. + unsigned getExceptionAddressRegister() const { + return ExceptionPointerRegister; + } + + /// getExceptionSelectorRegister - If a physical register, this returns + /// the register that receives the exception typeid on entry to a landing + /// pad. + unsigned getExceptionSelectorRegister() const { + return ExceptionSelectorRegister; + } + + /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never + /// set, the default is 200) + unsigned getJumpBufSize() const { + return JumpBufSize; + } + + /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes + /// (if never set, the default is 0) + unsigned getJumpBufAlignment() const { + return JumpBufAlignment; + } + + /// getIfCvtBlockLimit - returns the target specific if-conversion block size + /// limit. Any block whose size is greater should not be predicated. + unsigned getIfCvtBlockSizeLimit() const { + return IfCvtBlockSizeLimit; + } + + /// getIfCvtDupBlockLimit - returns the target specific size limit for a + /// block to be considered for duplication. Any block whose size is greater + /// should not be duplicated to facilitate its predication. + unsigned getIfCvtDupBlockSizeLimit() const { + return IfCvtDupBlockSizeLimit; + } + + /// getPrefLoopAlignment - return the preferred loop alignment. + /// + unsigned getPrefLoopAlignment() const { + return PrefLoopAlignment; + } + + /// getPreIndexedAddressParts - returns true by value, base pointer and + /// offset pointer and addressing mode by reference if the node's address + /// can be legally represented as pre-indexed load / store address. + virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + return false; + } + + /// getPostIndexedAddressParts - returns true by value, base pointer and + /// offset pointer and addressing mode by reference if this node can be + /// combined with a load / store to form a post-indexed load / store. + virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, + SDValue &Base, SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + return false; + } + + /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC + /// jumptable. + virtual SDValue getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const; + + /// isOffsetFoldingLegal - Return true if folding a constant offset + /// with the given GlobalAddress is legal. It is frequently not legal in + /// PIC relocation models. + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; + + //===--------------------------------------------------------------------===// + // TargetLowering Optimization Methods + // + + /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two + /// SDValues for returning information from TargetLowering to its clients + /// that want to combine + struct TargetLoweringOpt { + SelectionDAG &DAG; + SDValue Old; + SDValue New; + + explicit TargetLoweringOpt(SelectionDAG &InDAG) : DAG(InDAG) {} + + bool CombineTo(SDValue O, SDValue N) { + Old = O; + New = N; + return true; + } + + /// ShrinkDemandedConstant - Check to see if the specified operand of the + /// specified instruction is a constant integer. If so, check to see if + /// there are any bits set in the constant that are not demanded. If so, + /// shrink the constant and return true. + bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); + + /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the + /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening + /// cast, but it could be generalized for targets with other types of + /// implicit widening casts. + bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, + DebugLoc dl); + }; + + /// SimplifyDemandedBits - Look at Op. At this point, we know that only the + /// DemandedMask bits of the result of Op are ever used downstream. If we can + /// use this information to simplify Op, create a new simplified DAG node and + /// return true, returning the original and new nodes in Old and New. + /// Otherwise, analyze the expression and return a mask of KnownOne and + /// KnownZero bits for the expression (used to simplify the caller). + /// The KnownZero/One bits may only be accurate for those bits in the + /// DemandedMask. + bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, + APInt &KnownZero, APInt &KnownOne, + TargetLoweringOpt &TLO, unsigned Depth = 0) const; + + /// computeMaskedBitsForTargetNode - Determine which of the bits specified in + /// Mask are known to be either zero or one and return them in the + /// KnownZero/KnownOne bitsets. + virtual void computeMaskedBitsForTargetNode(const SDValue Op, + const APInt &Mask, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth = 0) const; + + /// ComputeNumSignBitsForTargetNode - This method can be implemented by + /// targets that want to expose additional information about sign bits to the + /// DAG Combiner. + virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + unsigned Depth = 0) const; + + struct DAGCombinerInfo { + void *DC; // The DAG Combiner object. + bool BeforeLegalize; + bool CalledByLegalizer; + public: + SelectionDAG &DAG; + + DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc) + : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {} + + bool isBeforeLegalize() const { return BeforeLegalize; } + bool isCalledByLegalizer() const { return CalledByLegalizer; } + + void AddToWorklist(SDNode *N); + SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, + bool AddTo = true); + SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); + SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); + + void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); + }; + + /// SimplifySetCC - Try to simplify a setcc built with the specified operands + /// and cc. If it is unable to simplify it, return a null SDValue. + SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, + ISD::CondCode Cond, bool foldBooleans, + DAGCombinerInfo &DCI, DebugLoc dl) const; + + /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the + /// node is a GlobalAddress + offset. + virtual bool + isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; + + /// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is + /// loading 'Bytes' bytes from a location that is 'Dist' units away from the + /// location that the 'Base' load is loading from. + bool isConsecutiveLoad(SDNode *LD, SDNode *Base, unsigned Bytes, int Dist, + const MachineFrameInfo *MFI) const; + + /// PerformDAGCombine - This method will be invoked for all target nodes and + /// for any target-independent nodes that the target has registered with + /// invoke it for. + /// + /// The semantics are as follows: + /// Return Value: + /// SDValue.Val == 0 - No change was made + /// SDValue.Val == N - N was replaced, is dead, and is already handled. + /// otherwise - N should be replaced by the returned Operand. + /// + /// In addition, methods provided by DAGCombinerInfo may be used to perform + /// more complex transformations. + /// + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + + //===--------------------------------------------------------------------===// + // TargetLowering Configuration Methods - These methods should be invoked by + // the derived class constructor to configure this object for the target. + // + +protected: + /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a + /// GOT for PC-relative code. + void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; } + + /// setShiftAmountType - Describe the type that should be used for shift + /// amounts. This type defaults to the pointer type. + void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; } + + /// setBooleanContents - Specify how the target extends the result of a + /// boolean value from i1 to a wider type. See getBooleanContents. + void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } + + /// setSchedulingPreference - Specify the target scheduling preference. + void setSchedulingPreference(SchedPreference Pref) { + SchedPreferenceInfo = Pref; + } + + /// setShiftAmountFlavor - Describe how the target handles out of range shift + /// amounts. + void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) { + ShiftAmtHandling = OORSA; + } + + /// setUseUnderscoreSetJmp - Indicate whether this target prefers to + /// use _setjmp to implement llvm.setjmp or the non _ version. + /// Defaults to false. + void setUseUnderscoreSetJmp(bool Val) { + UseUnderscoreSetJmp = Val; + } + + /// setUseUnderscoreLongJmp - Indicate whether this target prefers to + /// use _longjmp to implement llvm.longjmp or the non _ version. + /// Defaults to false. + void setUseUnderscoreLongJmp(bool Val) { + UseUnderscoreLongJmp = Val; + } + + /// setStackPointerRegisterToSaveRestore - If set to a physical register, this + /// specifies the register that llvm.savestack/llvm.restorestack should save + /// and restore. + void setStackPointerRegisterToSaveRestore(unsigned R) { + StackPointerRegisterToSaveRestore = R; + } + + /// setExceptionPointerRegister - If set to a physical register, this sets + /// the register that receives the exception address on entry to a landing + /// pad. + void setExceptionPointerRegister(unsigned R) { + ExceptionPointerRegister = R; + } + + /// setExceptionSelectorRegister - If set to a physical register, this sets + /// the register that receives the exception typeid on entry to a landing + /// pad. + void setExceptionSelectorRegister(unsigned R) { + ExceptionSelectorRegister = R; + } + + /// SelectIsExpensive - Tells the code generator not to expand operations + /// into sequences that use the select operations if possible. + void setSelectIsExpensive() { SelectIsExpensive = true; } + + /// setIntDivIsCheap - Tells the code generator that integer divide is + /// expensive, and if possible, should be replaced by an alternate sequence + /// of instructions not containing an integer divide. + void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } + + /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate + /// srl/add/sra for a signed divide by power of two, and let the target handle + /// it. + void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } + + /// addRegisterClass - Add the specified register class as an available + /// regclass for the specified value type. This indicates the selector can + /// handle values of that class natively. + void addRegisterClass(MVT VT, TargetRegisterClass *RC) { + assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + AvailableRegClasses.push_back(std::make_pair(VT, RC)); + RegClassForVT[VT.getSimpleVT()] = RC; + } + + /// computeRegisterProperties - Once all of the register classes are added, + /// this allows us to compute derived properties we expose. + void computeRegisterProperties(); + + /// setOperationAction - Indicate that the specified operation does not work + /// with the specified type and indicate what to do about it. + void setOperationAction(unsigned Op, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && + Op < array_lengthof(OpActions) && "Table isn't big enough!"); + OpActions[Op] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setLoadExtAction - Indicate that the specified load with extension does + /// not work with the with specified type and indicate what to do about it. + void setLoadExtAction(unsigned ExtType, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && + ExtType < array_lengthof(LoadExtActions) && + "Table isn't big enough!"); + LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + LoadExtActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setTruncStoreAction - Indicate that the specified truncating store does + /// not work with the with specified type and indicate what to do about it. + void setTruncStoreAction(MVT ValVT, MVT MemVT, + LegalizeAction Action) { + assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + "Table isn't big enough!"); + TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) << + MemVT.getSimpleVT()*2); + TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action << + MemVT.getSimpleVT()*2; + } + + /// setIndexedLoadAction - Indicate that the specified indexed load does or + /// does not work with the with specified type and indicate what to do abort + /// it. NOTE: All indexed mode loads are initialized to Expand in + /// TargetLowering.cpp + void setIndexedLoadAction(unsigned IdxMode, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0])*4 && + IdxMode < array_lengthof(IndexedModeActions[0]) && + "Table isn't big enough!"); + IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setIndexedStoreAction - Indicate that the specified indexed store does or + /// does not work with the with specified type and indicate what to do about + /// it. NOTE: All indexed mode stores are initialized to Expand in + /// TargetLowering.cpp + void setIndexedStoreAction(unsigned IdxMode, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && + IdxMode < array_lengthof(IndexedModeActions[1]) && + "Table isn't big enough!"); + IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// setConvertAction - Indicate that the specified conversion does or does + /// not work with the with specified type and indicate what to do about it. + void setConvertAction(MVT FromVT, MVT ToVT, + LegalizeAction Action) { + assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + "Table isn't big enough!"); + ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) << + ToVT.getSimpleVT()*2); + ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action << + ToVT.getSimpleVT()*2; + } + + /// setCondCodeAction - Indicate that the specified condition code is or isn't + /// supported on the target and indicate what to do about it. + void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action) { + assert((unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && + (unsigned)CC < array_lengthof(CondCodeActions) && + "Table isn't big enough!"); + CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); + CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.getSimpleVT()*2; + } + + /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the + /// promotion code defaults to trying a larger integer/fp until it can find + /// one that works. If that default is insufficient, this method can be used + /// by the target to override the default. + void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { + PromoteToType[std::make_pair(Opc, OrigVT.getSimpleVT())] = + DestVT.getSimpleVT(); + } + + /// addLegalFPImmediate - Indicate that this target can instruction select + /// the specified FP immediate natively. + void addLegalFPImmediate(const APFloat& Imm) { + LegalFPImmediates.push_back(Imm); + } + + /// setTargetDAGCombine - Targets should invoke this method for each target + /// independent node that they want to provide a custom DAG combiner for by + /// implementing the PerformDAGCombine virtual method. + void setTargetDAGCombine(ISD::NodeType NT) { + assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); + TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); + } + + /// setJumpBufSize - Set the target's required jmp_buf buffer size (in + /// bytes); default is 200 + void setJumpBufSize(unsigned Size) { + JumpBufSize = Size; + } + + /// setJumpBufAlignment - Set the target's required jmp_buf buffer + /// alignment (in bytes); default is 0 + void setJumpBufAlignment(unsigned Align) { + JumpBufAlignment = Align; + } + + /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size + /// limit (in number of instructions); default is 2. + void setIfCvtBlockSizeLimit(unsigned Limit) { + IfCvtBlockSizeLimit = Limit; + } + + /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number + /// of instructions) to be considered for code duplication during + /// if-conversion; default is 2. + void setIfCvtDupBlockSizeLimit(unsigned Limit) { + IfCvtDupBlockSizeLimit = Limit; + } + + /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default + /// alignment is zero, it means the target does not care about loop alignment. + void setPrefLoopAlignment(unsigned Align) { + PrefLoopAlignment = Align; + } + +public: + + virtual const TargetSubtarget *getSubtarget() { + assert(0 && "Not Implemented"); + return NULL; // this is here to silence compiler errors + } + //===--------------------------------------------------------------------===// + // Lowering methods - These methods must be implemented by targets so that + // the SelectionDAGLowering code knows how to lower these. + // + + /// LowerArguments - This hook must be implemented to indicate how we should + /// lower the arguments for the specified function, into the specified DAG. + virtual void + LowerArguments(Function &F, SelectionDAG &DAG, + SmallVectorImpl<SDValue>& ArgValues, DebugLoc dl); + + /// LowerCallTo - This hook lowers an abstract call to a function into an + /// actual call. This returns a pair of operands. The first element is the + /// return value for the function (if RetTy is not VoidTy). The second + /// element is the outgoing token chain. + struct ArgListEntry { + SDValue Node; + const Type* Ty; + bool isSExt : 1; + bool isZExt : 1; + bool isInReg : 1; + bool isSRet : 1; + bool isNest : 1; + bool isByVal : 1; + uint16_t Alignment; + + ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), + isSRet(false), isNest(false), isByVal(false), Alignment(0) { } + }; + typedef std::vector<ArgListEntry> ArgListTy; + virtual std::pair<SDValue, SDValue> + LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, + bool isVarArg, bool isInreg, unsigned CallingConv, + bool isTailCall, SDValue Callee, ArgListTy &Args, + SelectionDAG &DAG, DebugLoc dl); + + /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a + /// memcpy. This can be used by targets to provide code sequences for cases + /// that don't fit the target's parameters for simple loads/stores and can be + /// more efficient than using a library call. This function can return a null + /// SDValue if the target declines to use custom code and a different + /// lowering strategy should be used. + /// + /// If AlwaysInline is true, the size is constant and the target should not + /// emit any calls and is strongly encouraged to attempt to emit inline code + /// even if it is beyond the usual threshold because this intrinsic is being + /// expanded in a place where calls are not feasible (e.g. within the prologue + /// for another call). If the target chooses to decline an AlwaysInline + /// request here, legalize will resort to using simple loads and stores. + virtual SDValue + EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, + SDValue Chain, + SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, + bool AlwaysInline, + const Value *DstSV, uint64_t DstOff, + const Value *SrcSV, uint64_t SrcOff) { + return SDValue(); + } + + /// EmitTargetCodeForMemmove - Emit target-specific code that performs a + /// memmove. This can be used by targets to provide code sequences for cases + /// that don't fit the target's parameters for simple loads/stores and can be + /// more efficient than using a library call. This function can return a null + /// SDValue if the target declines to use custom code and a different + /// lowering strategy should be used. + virtual SDValue + EmitTargetCodeForMemmove(SelectionDAG &DAG, DebugLoc dl, + SDValue Chain, + SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, + const Value *DstSV, uint64_t DstOff, + const Value *SrcSV, uint64_t SrcOff) { + return SDValue(); + } + + /// EmitTargetCodeForMemset - Emit target-specific code that performs a + /// memset. This can be used by targets to provide code sequences for cases + /// that don't fit the target's parameters for simple stores and can be more + /// efficient than using a library call. This function can return a null + /// SDValue if the target declines to use custom code and a different + /// lowering strategy should be used. + virtual SDValue + EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, + SDValue Chain, + SDValue Op1, SDValue Op2, + SDValue Op3, unsigned Align, + const Value *DstSV, uint64_t DstOff) { + return SDValue(); + } + + /// LowerOperationWrapper - This callback is invoked by the type legalizer + /// to legalize nodes with an illegal operand type but legal result types. + /// It replaces the LowerOperation callback in the type Legalizer. + /// The reason we can not do away with LowerOperation entirely is that + /// LegalizeDAG isn't yet ready to use this callback. + /// TODO: Consider merging with ReplaceNodeResults. + + /// The target places new result values for the node in Results (their number + /// and types must exactly match those of the original return values of + /// the node), or leaves Results empty, which indicates that the node is not + /// to be custom lowered after all. + /// The default implementation calls LowerOperation. + virtual void LowerOperationWrapper(SDNode *N, + SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG); + + /// LowerOperation - This callback is invoked for operations that are + /// unsupported by the target, which are registered to use 'custom' lowering, + /// and whose defined values are all legal. + /// If the target has no operations that require custom lowering, it need not + /// implement this. The default implementation of this aborts. + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); + + /// ReplaceNodeResults - This callback is invoked when a node result type is + /// illegal for the target, and the operation was registered to use 'custom' + /// lowering for that result type. The target places new result values for + /// the node in Results (their number and types must exactly match those of + /// the original return values of the node), or leaves Results empty, which + /// indicates that the node is not to be custom lowered after all. + /// + /// If the target has no operations that require custom lowering, it need not + /// implement this. The default implementation aborts. + virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG) { + assert(0 && "ReplaceNodeResults not implemented for this target!"); + } + + /// IsEligibleForTailCallOptimization - Check whether the call is eligible for + /// tail call optimization. Targets which want to do tail call optimization + /// should override this function. + virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call, + SDValue Ret, + SelectionDAG &DAG) const { + return false; + } + + /// CheckTailCallReturnConstraints - Check whether CALL node immediatly + /// preceeds the RET node and whether the return uses the result of the node + /// or is a void return. This function can be used by the target to determine + /// eligiblity of tail call optimization. + static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret); + + /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if + /// it exists. Skip a possible ISD::TokenFactor. + static SDValue GetPossiblePreceedingTailCall(SDValue Chain, + unsigned TailCallNodeOpCode) { + if (Chain.getOpcode() == TailCallNodeOpCode) { + return Chain; + } else if (Chain.getOpcode() == ISD::TokenFactor) { + if (Chain.getNumOperands() && + Chain.getOperand(0).getOpcode() == TailCallNodeOpCode) + return Chain.getOperand(0); + } + return Chain; + } + + /// getTargetNodeName() - This method returns the name of a target specific + /// DAG node. + virtual const char *getTargetNodeName(unsigned Opcode) const; + + /// createFastISel - This method returns a target specific FastISel object, + /// or null if the target does not support "fast" ISel. + virtual FastISel * + createFastISel(MachineFunction &, + MachineModuleInfo *, DwarfWriter *, + DenseMap<const Value *, unsigned> &, + DenseMap<const BasicBlock *, MachineBasicBlock *> &, + DenseMap<const AllocaInst *, int> & +#ifndef NDEBUG + , SmallSet<Instruction*, 8> &CatchInfoLost +#endif + ) { + return 0; + } + + //===--------------------------------------------------------------------===// + // Inline Asm Support hooks + // + + enum ConstraintType { + C_Register, // Constraint represents specific register(s). + C_RegisterClass, // Constraint represents any of register(s) in class. + C_Memory, // Memory constraint. + C_Other, // Something else. + C_Unknown // Unsupported constraint. + }; + + /// AsmOperandInfo - This contains information for each constraint that we are + /// lowering. + struct AsmOperandInfo : public InlineAsm::ConstraintInfo { + /// ConstraintCode - This contains the actual string for the code, like "m". + /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that + /// most closely matches the operand. + std::string ConstraintCode; + + /// ConstraintType - Information about the constraint code, e.g. Register, + /// RegisterClass, Memory, Other, Unknown. + TargetLowering::ConstraintType ConstraintType; + + /// CallOperandval - If this is the result output operand or a + /// clobber, this is null, otherwise it is the incoming operand to the + /// CallInst. This gets modified as the asm is processed. + Value *CallOperandVal; + + /// ConstraintVT - The ValueType for the operand value. + MVT ConstraintVT; + + /// isMatchingInputConstraint - Return true of this is an input operand that + /// is a matching constraint like "4". + bool isMatchingInputConstraint() const; + + /// getMatchedOperand - If this is an input matching constraint, this method + /// returns the output operand it matches. + unsigned getMatchedOperand() const; + + AsmOperandInfo(const InlineAsm::ConstraintInfo &info) + : InlineAsm::ConstraintInfo(info), + ConstraintType(TargetLowering::C_Unknown), + CallOperandVal(0), ConstraintVT(MVT::Other) { + } + }; + + /// ComputeConstraintToUse - Determines the constraint code and constraint + /// type to use for the specific AsmOperandInfo, setting + /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand + /// being passed in is available, it can be passed in as Op, otherwise an + /// empty SDValue can be passed. If hasMemory is true it means one of the asm + /// constraint of the inline asm instruction being processed is 'm'. + virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, + SDValue Op, + bool hasMemory, + SelectionDAG *DAG = 0) const; + + /// getConstraintType - Given a constraint, return the type of constraint it + /// is for this target. + virtual ConstraintType getConstraintType(const std::string &Constraint) const; + + /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"), + /// return a list of registers that can be used to satisfy the constraint. + /// This should only be used for C_RegisterClass constraints. + virtual std::vector<unsigned> + getRegClassForInlineAsmConstraint(const std::string &Constraint, + MVT VT) const; + + /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. + /// {edx}), return the register number and the register class for the + /// register. + /// + /// Given a register class constraint, like 'r', if this corresponds directly + /// to an LLVM register class, return a register of 0 and the register class + /// pointer. + /// + /// This should only be used for C_Register constraints. On error, + /// this returns a register number of 0 and a null register class pointer.. + virtual std::pair<unsigned, const TargetRegisterClass*> + getRegForInlineAsmConstraint(const std::string &Constraint, + MVT VT) const; + + /// LowerXConstraint - try to replace an X constraint, which matches anything, + /// with another that has more specific requirements based on the type of the + /// corresponding operand. This returns null if there is no replacement to + /// make. + virtual const char *LowerXConstraint(MVT ConstraintVT) const; + + /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops + /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true + /// it means one of the asm constraint of the inline asm instruction being + /// processed is 'm'. + virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, + bool hasMemory, + std::vector<SDValue> &Ops, + SelectionDAG &DAG) const; + + //===--------------------------------------------------------------------===// + // Scheduler hooks + // + + // EmitInstrWithCustomInserter - This method should be implemented by targets + // that mark instructions with the 'usesCustomDAGSchedInserter' flag. These + // instructions are special in various ways, which require special support to + // insert. The specified MachineInstr is created but not inserted into any + // basic blocks, and the scheduler passes ownership of it to this method. + virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB) const; + + //===--------------------------------------------------------------------===// + // Addressing mode description hooks (used by LSR etc). + // + + /// AddrMode - This represents an addressing mode of: + /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + /// If BaseGV is null, there is no BaseGV. + /// If BaseOffs is zero, there is no base offset. + /// If HasBaseReg is false, there is no base register. + /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with + /// no scale. + /// + struct AddrMode { + GlobalValue *BaseGV; + int64_t BaseOffs; + bool HasBaseReg; + int64_t Scale; + AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} + }; + + /// isLegalAddressingMode - Return true if the addressing mode represented by + /// AM is legal for this target, for a load/store of the specified type. + /// The type may be VoidTy, in which case only return true if the addressing + /// mode is legal for a load/store of any legal type. + /// TODO: Handle pre/postinc as well. + virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const; + + /// isTruncateFree - Return true if it's free to truncate a value of + /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in + /// register EAX to i16 by referencing its sub-register AX. + virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const { + return false; + } + + virtual bool isTruncateFree(MVT VT1, MVT VT2) const { + return false; + } + + /// isZExtFree - Return true if any actual instruction that defines a + /// value of type Ty1 implicit zero-extends the value to Ty2 in the result + /// register. This does not necessarily include registers defined in + /// unknown ways, such as incoming arguments, or copies from unknown + /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this + /// does not necessarily apply to truncate instructions. e.g. on x86-64, + /// all instructions that define 32-bit values implicit zero-extend the + /// result out to 64 bits. + virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const { + return false; + } + + virtual bool isZExtFree(MVT VT1, MVT VT2) const { + return false; + } + + /// isNarrowingProfitable - Return true if it's profitable to narrow + /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow + /// from i32 to i8 but not from i32 to i16. + virtual bool isNarrowingProfitable(MVT VT1, MVT VT2) const { + return false; + } + + //===--------------------------------------------------------------------===// + // Div utility functions + // + SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, + std::vector<SDNode*>* Created) const; + SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, + std::vector<SDNode*>* Created) const; + + + //===--------------------------------------------------------------------===// + // Runtime Library hooks + // + + /// setLibcallName - Rename the default libcall routine name for the specified + /// libcall. + void setLibcallName(RTLIB::Libcall Call, const char *Name) { + LibcallRoutineNames[Call] = Name; + } + + /// getLibcallName - Get the libcall routine name for the specified libcall. + /// + const char *getLibcallName(RTLIB::Libcall Call) const { + return LibcallRoutineNames[Call]; + } + + /// setCmpLibcallCC - Override the default CondCode to be used to test the + /// result of the comparison libcall against zero. + void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { + CmpLibcallCCs[Call] = CC; + } + + /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of + /// the comparison libcall against zero. + ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { + return CmpLibcallCCs[Call]; + } + +private: + TargetMachine &TM; + const TargetData *TD; + + /// PointerTy - The type to use for pointers, usually i32 or i64. + /// + MVT PointerTy; + + /// IsLittleEndian - True if this is a little endian target. + /// + bool IsLittleEndian; + + /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen. + /// + bool UsesGlobalOffsetTable; + + /// SelectIsExpensive - Tells the code generator not to expand operations + /// into sequences that use the select operations if possible. + bool SelectIsExpensive; + + /// IntDivIsCheap - Tells the code generator not to expand integer divides by + /// constants into a sequence of muls, adds, and shifts. This is a hack until + /// a real cost model is in place. If we ever optimize for size, this will be + /// set to true unconditionally. + bool IntDivIsCheap; + + /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate + /// srl/add/sra for a signed divide by power of two, and let the target handle + /// it. + bool Pow2DivIsCheap; + + /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement + /// llvm.setjmp. Defaults to false. + bool UseUnderscoreSetJmp; + + /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement + /// llvm.longjmp. Defaults to false. + bool UseUnderscoreLongJmp; + + /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever + /// PointerTy is. + MVT ShiftAmountTy; + + OutOfRangeShiftAmount ShiftAmtHandling; + + /// BooleanContents - Information about the contents of the high-bits in + /// boolean values held in a type wider than i1. See getBooleanContents. + BooleanContent BooleanContents; + + /// SchedPreferenceInfo - The target scheduling preference: shortest possible + /// total cycles or lowest register usage. + SchedPreference SchedPreferenceInfo; + + /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers + unsigned JumpBufSize; + + /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf + /// buffers + unsigned JumpBufAlignment; + + /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be + /// if-converted. + unsigned IfCvtBlockSizeLimit; + + /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be + /// duplicated during if-conversion. + unsigned IfCvtDupBlockSizeLimit; + + /// PrefLoopAlignment - The perferred loop alignment. + /// + unsigned PrefLoopAlignment; + + /// StackPointerRegisterToSaveRestore - If set to a physical register, this + /// specifies the register that llvm.savestack/llvm.restorestack should save + /// and restore. + unsigned StackPointerRegisterToSaveRestore; + + /// ExceptionPointerRegister - If set to a physical register, this specifies + /// the register that receives the exception address on entry to a landing + /// pad. + unsigned ExceptionPointerRegister; + + /// ExceptionSelectorRegister - If set to a physical register, this specifies + /// the register that receives the exception typeid on entry to a landing + /// pad. + unsigned ExceptionSelectorRegister; + + /// RegClassForVT - This indicates the default register class to use for + /// each ValueType the target supports natively. + TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; + unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; + MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; + + /// TransformToType - For any value types we are promoting or expanding, this + /// contains the value type that we are changing to. For Expanded types, this + /// contains one step of the expand (e.g. i64 -> i32), even if there are + /// multiple steps required (e.g. i64 -> i16). For types natively supported + /// by the system, this holds the same type (e.g. i32 -> i32). + MVT TransformToType[MVT::LAST_VALUETYPE]; + + /// OpActions - For each operation and each value type, keep a LegalizeAction + /// that indicates how instruction selection should deal with the operation. + /// Most operations are Legal (aka, supported natively by the target), but + /// operations that are not should be described. Note that operations on + /// non-legal value types are not described here. + uint64_t OpActions[ISD::BUILTIN_OP_END]; + + /// LoadExtActions - For each load of load extension type and each value type, + /// keep a LegalizeAction that indicates how instruction selection should deal + /// with the load. + uint64_t LoadExtActions[ISD::LAST_LOADEXT_TYPE]; + + /// TruncStoreActions - For each truncating store, keep a LegalizeAction that + /// indicates how instruction selection should deal with the store. + uint64_t TruncStoreActions[MVT::LAST_VALUETYPE]; + + /// IndexedModeActions - For each indexed mode and each value type, keep a + /// pair of LegalizeAction that indicates how instruction selection should + /// deal with the load / store. + uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE]; + + /// ConvertActions - For each conversion from source type to destination type, + /// keep a LegalizeAction that indicates how instruction selection should + /// deal with the conversion. + /// Currently, this is used only for floating->floating conversions + /// (FP_EXTEND and FP_ROUND). + uint64_t ConvertActions[MVT::LAST_VALUETYPE]; + + /// CondCodeActions - For each condition code (ISD::CondCode) keep a + /// LegalizeAction that indicates how instruction selection should + /// deal with the condition code. + uint64_t CondCodeActions[ISD::SETCC_INVALID]; + + ValueTypeActionImpl ValueTypeActions; + + std::vector<APFloat> LegalFPImmediates; + + std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses; + + /// TargetDAGCombineArray - Targets can specify ISD nodes that they would + /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), + /// which sets a bit in this array. + unsigned char + TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; + + /// PromoteToType - For operations that must be promoted to a specific type, + /// this holds the destination type. This map should be sparse, so don't hold + /// it as an array. + /// + /// Targets add entries to this map with AddPromotedToType(..), clients access + /// this with getTypeToPromoteTo(..). + std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> + PromoteToType; + + /// LibcallRoutineNames - Stores the name each libcall. + /// + const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; + + /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result + /// of each of the comparison libcall against zero. + ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; + +protected: + /// When lowering \@llvm.memset this field specifies the maximum number of + /// store operations that may be substituted for the call to memset. Targets + /// must set this value based on the cost threshold for that target. Targets + /// should assume that the memset will be done using as many of the largest + /// store operations first, followed by smaller ones, if necessary, per + /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine + /// with 16-bit alignment would result in four 2-byte stores and one 1-byte + /// store. This only applies to setting a constant array of a constant size. + /// @brief Specify maximum number of store instructions per memset call. + unsigned maxStoresPerMemset; + + /// When lowering \@llvm.memcpy this field specifies the maximum number of + /// store operations that may be substituted for a call to memcpy. Targets + /// must set this value based on the cost threshold for that target. Targets + /// should assume that the memcpy will be done using as many of the largest + /// store operations first, followed by smaller ones, if necessary, per + /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine + /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store + /// and one 1-byte store. This only applies to copying a constant array of + /// constant size. + /// @brief Specify maximum bytes of store instructions per memcpy call. + unsigned maxStoresPerMemcpy; + + /// When lowering \@llvm.memmove this field specifies the maximum number of + /// store instructions that may be substituted for a call to memmove. Targets + /// must set this value based on the cost threshold for that target. Targets + /// should assume that the memmove will be done using as many of the largest + /// store operations first, followed by smaller ones, if necessary, per + /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine + /// with 8-bit alignment would result in nine 1-byte stores. This only + /// applies to copying a constant array of constant size. + /// @brief Specify maximum bytes of store instructions per memmove call. + unsigned maxStoresPerMemmove; + + /// This field specifies whether the target machine permits unaligned memory + /// accesses. This is used, for example, to determine the size of store + /// operations when copying small arrays and other similar tasks. + /// @brief Indicate whether the target permits unaligned memory accesses. + bool allowUnalignedMemoryAccesses; + + /// This field specifies whether the target can benefit from code placement + /// optimization. + bool benefitFromCodePlacementOpt; +}; +} // end llvm namespace + +#endif diff --git a/include/llvm/Target/TargetMachOWriterInfo.h b/include/llvm/Target/TargetMachOWriterInfo.h new file mode 100644 index 0000000..f723bb5 --- /dev/null +++ b/include/llvm/Target/TargetMachOWriterInfo.h @@ -0,0 +1,112 @@ +//===-- llvm/Target/TargetMachOWriterInfo.h - MachO Writer Info--*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetMachOWriterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETMACHOWRITERINFO_H +#define LLVM_TARGET_TARGETMACHOWRITERINFO_H + +#include "llvm/CodeGen/MachineRelocation.h" + +namespace llvm { + + class MachineBasicBlock; + class OutputBuffer; + + //===--------------------------------------------------------------------===// + // TargetMachOWriterInfo + //===--------------------------------------------------------------------===// + + class TargetMachOWriterInfo { + uint32_t CPUType; // CPU specifier + uint32_t CPUSubType; // Machine specifier + public: + // The various CPU_TYPE_* constants are already defined by at least one + // system header file and create compilation errors if not respected. +#if !defined(CPU_TYPE_I386) +#define CPU_TYPE_I386 7 +#endif +#if !defined(CPU_TYPE_X86_64) +#define CPU_TYPE_X86_64 (CPU_TYPE_I386 | 0x1000000) +#endif +#if !defined(CPU_TYPE_ARM) +#define CPU_TYPE_ARM 12 +#endif +#if !defined(CPU_TYPE_SPARC) +#define CPU_TYPE_SPARC 14 +#endif +#if !defined(CPU_TYPE_POWERPC) +#define CPU_TYPE_POWERPC 18 +#endif +#if !defined(CPU_TYPE_POWERPC64) +#define CPU_TYPE_POWERPC64 (CPU_TYPE_POWERPC | 0x1000000) +#endif + + // Constants for the cputype field + // see <mach/machine.h> + enum { + HDR_CPU_TYPE_I386 = CPU_TYPE_I386, + HDR_CPU_TYPE_X86_64 = CPU_TYPE_X86_64, + HDR_CPU_TYPE_ARM = CPU_TYPE_ARM, + HDR_CPU_TYPE_SPARC = CPU_TYPE_SPARC, + HDR_CPU_TYPE_POWERPC = CPU_TYPE_POWERPC, + HDR_CPU_TYPE_POWERPC64 = CPU_TYPE_POWERPC64 + }; + +#if !defined(CPU_SUBTYPE_I386_ALL) +#define CPU_SUBTYPE_I386_ALL 3 +#endif +#if !defined(CPU_SUBTYPE_X86_64_ALL) +#define CPU_SUBTYPE_X86_64_ALL 3 +#endif +#if !defined(CPU_SUBTYPE_ARM_ALL) +#define CPU_SUBTYPE_ARM_ALL 0 +#endif +#if !defined(CPU_SUBTYPE_SPARC_ALL) +#define CPU_SUBTYPE_SPARC_ALL 0 +#endif +#if !defined(CPU_SUBTYPE_POWERPC_ALL) +#define CPU_SUBTYPE_POWERPC_ALL 0 +#endif + + // Constants for the cpusubtype field + // see <mach/machine.h> + enum { + HDR_CPU_SUBTYPE_I386_ALL = CPU_SUBTYPE_I386_ALL, + HDR_CPU_SUBTYPE_X86_64_ALL = CPU_SUBTYPE_X86_64_ALL, + HDR_CPU_SUBTYPE_ARM_ALL = CPU_SUBTYPE_ARM_ALL, + HDR_CPU_SUBTYPE_SPARC_ALL = CPU_SUBTYPE_SPARC_ALL, + HDR_CPU_SUBTYPE_POWERPC_ALL = CPU_SUBTYPE_POWERPC_ALL + }; + + TargetMachOWriterInfo(uint32_t cputype, uint32_t cpusubtype) + : CPUType(cputype), CPUSubType(cpusubtype) {} + virtual ~TargetMachOWriterInfo(); + + virtual MachineRelocation GetJTRelocation(unsigned Offset, + MachineBasicBlock *MBB) const; + + virtual unsigned GetTargetRelocation(MachineRelocation &MR, + unsigned FromIdx, + unsigned ToAddr, + unsigned ToIdx, + OutputBuffer &RelocOut, + OutputBuffer &SecOut, + bool Scattered, + bool Extern) const { return 0; } + + uint32_t getCPUType() const { return CPUType; } + uint32_t getCPUSubType() const { return CPUSubType; } + }; + +} // end llvm namespace + +#endif // LLVM_TARGET_TARGETMACHOWRITERINFO_H diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h new file mode 100644 index 0000000..bdcc4ef --- /dev/null +++ b/include/llvm/Target/TargetMachine.h @@ -0,0 +1,432 @@ +//===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TargetMachine and LLVMTargetMachine classes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETMACHINE_H +#define LLVM_TARGET_TARGETMACHINE_H + +#include "llvm/Target/TargetInstrItineraries.h" +#include <cassert> + +namespace llvm { + +class TargetAsmInfo; +class TargetData; +class TargetSubtarget; +class TargetInstrInfo; +class TargetIntrinsicInfo; +class TargetJITInfo; +class TargetLowering; +class TargetFrameInfo; +class MachineCodeEmitter; +class JITCodeEmitter; +class TargetRegisterInfo; +class Module; +class PassManagerBase; +class PassManager; +class Pass; +class TargetMachOWriterInfo; +class TargetELFWriterInfo; +class raw_ostream; + +// Relocation model types. +namespace Reloc { + enum Model { + Default, + Static, + PIC_, // Cannot be named PIC due to collision with -DPIC + DynamicNoPIC + }; +} + +// Code model types. +namespace CodeModel { + enum Model { + Default, + Small, + Kernel, + Medium, + Large + }; +} + +namespace FileModel { + enum Model { + Error, + None, + AsmFile, + MachOFile, + ElfFile + }; +} + +// Code generation optimization level. +namespace CodeGenOpt { + enum Level { + Default, + None, + Aggressive + }; +} + +//===----------------------------------------------------------------------===// +/// +/// TargetMachine - Primary interface to the complete machine description for +/// the target machine. All target-specific information should be accessible +/// through this interface. +/// +class TargetMachine { + TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT + void operator=(const TargetMachine &); // DO NOT IMPLEMENT +protected: // Can only create subclasses. + TargetMachine() : AsmInfo(0) { } + + /// getSubtargetImpl - virtual method implemented by subclasses that returns + /// a reference to that target's TargetSubtarget-derived member variable. + virtual const TargetSubtarget *getSubtargetImpl() const { return 0; } + + /// AsmInfo - Contains target specific asm information. + /// + mutable const TargetAsmInfo *AsmInfo; + + /// createTargetAsmInfo - Create a new instance of target specific asm + /// information. + virtual const TargetAsmInfo *createTargetAsmInfo() const { return 0; } + +public: + virtual ~TargetMachine(); + + /// getModuleMatchQuality - This static method should be implemented by + /// targets to indicate how closely they match the specified module. This is + /// used by the LLC tool to determine which target to use when an explicit + /// -march option is not specified. If a target returns zero, it will never + /// be chosen without an explicit -march option. + static unsigned getModuleMatchQuality(const Module &) { return 0; } + + /// getJITMatchQuality - This static method should be implemented by targets + /// that provide JIT capabilities to indicate how suitable they are for + /// execution on the current host. If a value of 0 is returned, the target + /// will not be used unless an explicit -march option is used. + static unsigned getJITMatchQuality() { return 0; } + + // Interfaces to the major aspects of target machine information: + // -- Instruction opcode and operand information + // -- Pipelines and scheduling information + // -- Stack frame information + // -- Selection DAG lowering information + // + virtual const TargetInstrInfo *getInstrInfo() const { return 0; } + virtual const TargetFrameInfo *getFrameInfo() const { return 0; } + virtual TargetLowering *getTargetLowering() const { return 0; } + virtual const TargetData *getTargetData() const { return 0; } + + /// getTargetAsmInfo - Return target specific asm information. + /// + const TargetAsmInfo *getTargetAsmInfo() const { + if (!AsmInfo) AsmInfo = createTargetAsmInfo(); + return AsmInfo; + } + + /// getSubtarget - This method returns a pointer to the specified type of + /// TargetSubtarget. In debug builds, it verifies that the object being + /// returned is of the correct type. + template<typename STC> const STC &getSubtarget() const { + const TargetSubtarget *TST = getSubtargetImpl(); + assert(TST && dynamic_cast<const STC*>(TST) && + "Not the right kind of subtarget!"); + return *static_cast<const STC*>(TST); + } + + /// getRegisterInfo - If register information is available, return it. If + /// not, return null. This is kept separate from RegInfo until RegInfo has + /// details of graph coloring register allocation removed from it. + /// + virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; } + + /// getIntrinsicInfo - If intrinsic information is available, return it. If + /// not, return null. + /// + virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return 0; } + + /// getJITInfo - If this target supports a JIT, return information for it, + /// otherwise return null. + /// + virtual TargetJITInfo *getJITInfo() { return 0; } + + /// getInstrItineraryData - Returns instruction itinerary data for the target + /// or specific subtarget. + /// + virtual const InstrItineraryData getInstrItineraryData() const { + return InstrItineraryData(); + } + + /// getMachOWriterInfo - If this target supports a Mach-O writer, return + /// information for it, otherwise return null. + /// + virtual const TargetMachOWriterInfo *getMachOWriterInfo() const { return 0; } + + /// getELFWriterInfo - If this target supports an ELF writer, return + /// information for it, otherwise return null. + /// + virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; } + + /// getRelocationModel - Returns the code generation relocation model. The + /// choices are static, PIC, and dynamic-no-pic, and target default. + static Reloc::Model getRelocationModel(); + + /// setRelocationModel - Sets the code generation relocation model. + /// + static void setRelocationModel(Reloc::Model Model); + + /// getCodeModel - Returns the code model. The choices are small, kernel, + /// medium, large, and target default. + static CodeModel::Model getCodeModel(); + + /// setCodeModel - Sets the code model. + /// + static void setCodeModel(CodeModel::Model Model); + + /// getAsmVerbosityDefault - Returns the default value of asm verbosity. + /// + static bool getAsmVerbosityDefault(); + + /// setAsmVerbosityDefault - Set the default value of asm verbosity. Default + /// is false. + static void setAsmVerbosityDefault(bool); + + /// CodeGenFileType - These enums are meant to be passed into + /// addPassesToEmitFile to indicate what type of file to emit. + enum CodeGenFileType { + AssemblyFile, ObjectFile, DynamicLibrary + }; + + /// getEnableTailMergeDefault - the default setting for -enable-tail-merge + /// on this target. User flag overrides. + virtual bool getEnableTailMergeDefault() const { return true; } + + /// addPassesToEmitFile - Add passes to the specified pass manager to get the + /// specified file emitted. Typically this will involve several steps of code + /// generation. If Fast is set to true, the code generator should emit code + /// as fast as possible, though the generated code may be less efficient. + /// This method should return FileModel::Error if emission of this file type + /// is not supported. + /// + virtual FileModel::Model addPassesToEmitFile(PassManagerBase &, + raw_ostream &, + CodeGenFileType, + CodeGenOpt::Level) { + return FileModel::None; + } + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &, + MachineCodeEmitter *, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &, + JITCodeEmitter *, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &, + MachineCodeEmitter &, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &, + JITCodeEmitter &, + CodeGenOpt::Level) { + return true; + } + + /// addPassesToEmitWholeFile - This method can be implemented by targets that + /// require having the entire module at once. This is not recommended, do not + /// use this. + virtual bool WantsWholeFile() const { return false; } + virtual bool addPassesToEmitWholeFile(PassManager &, raw_ostream &, + CodeGenFileType, + CodeGenOpt::Level) { + return true; + } +}; + +/// LLVMTargetMachine - This class describes a target machine that is +/// implemented with the LLVM target-independent code generator. +/// +class LLVMTargetMachine : public TargetMachine { +protected: // Can only create subclasses. + LLVMTargetMachine() { } + + /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for + /// both emitting to assembly files or machine code output. + /// + bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level); + +public: + + /// addPassesToEmitFile - Add passes to the specified pass manager to get the + /// specified file emitted. Typically this will involve several steps of code + /// generation. If OptLevel is None, the code generator should emit code as fast + /// as possible, though the generated code may be less efficient. This method + /// should return FileModel::Error if emission of this file type is not + /// supported. + /// + /// The default implementation of this method adds components from the + /// LLVM retargetable code generator, invoking the methods below to get + /// target-specific passes in standard locations. + /// + virtual FileModel::Model addPassesToEmitFile(PassManagerBase &PM, + raw_ostream &Out, + CodeGenFileType FileType, + CodeGenOpt::Level); + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &PM, + MachineCodeEmitter *MCE, + CodeGenOpt::Level); + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &PM, + JITCodeEmitter *MCE, + CodeGenOpt::Level); + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &PM, + MachineCodeEmitter &MCE, + CodeGenOpt::Level); + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(PassManagerBase &PM, + JITCodeEmitter &MCE, + CodeGenOpt::Level); + + /// Target-Independent Code Generator Pass Configuration Options. + + /// addInstSelector - This method should add any "last minute" LLVM->LLVM + /// passes, then install an instruction selector pass, which converts from + /// LLVM code to machine instructions. + virtual bool addInstSelector(PassManagerBase &, CodeGenOpt::Level) { + return true; + } + + /// addPreRegAllocPasses - This method may be implemented by targets that want + /// to run passes immediately before register allocation. This should return + /// true if -print-machineinstrs should print after these passes. + virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level) { + return false; + } + + /// addPostRegAllocPasses - This method may be implemented by targets that + /// want to run passes after register allocation but before prolog-epilog + /// insertion. This should return true if -print-machineinstrs should print + /// after these passes. + virtual bool addPostRegAlloc(PassManagerBase &, CodeGenOpt::Level) { + return false; + } + + /// addPreEmitPass - This pass may be implemented by targets that want to run + /// passes immediately before machine code is emitted. This should return + /// true if -print-machineinstrs should print out the code after the passes. + virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level) { + return false; + } + + + /// addAssemblyEmitter - This pass should be overridden by the target to add + /// the asmprinter, if asm emission is supported. If this is not supported, + /// 'true' should be returned. + virtual bool addAssemblyEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /* VerboseAsmDefault */, raw_ostream &) { + return true; + } + + /// addCodeEmitter - This pass should be overridden by the target to add a + /// code emitter, if supported. If this is not supported, 'true' should be + /// returned. If DumpAsm is true, the generated assembly is printed to cerr. + virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, MachineCodeEmitter &) { + return true; + } + + /// addCodeEmitter - This pass should be overridden by the target to add a + /// code emitter, if supported. If this is not supported, 'true' should be + /// returned. If DumpAsm is true, the generated assembly is printed to cerr. + virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, JITCodeEmitter &) { + return true; + } + + /// addSimpleCodeEmitter - This pass should be overridden by the target to add + /// a code emitter (without setting flags), if supported. If this is not + /// supported, 'true' should be returned. If DumpAsm is true, the generated + /// assembly is printed to cerr. + virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, MachineCodeEmitter &) { + return true; + } + + /// addSimpleCodeEmitter - This pass should be overridden by the target to add + /// a code emitter (without setting flags), if supported. If this is not + /// supported, 'true' should be returned. If DumpAsm is true, the generated + /// assembly is printed to cerr. + virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /*DumpAsm*/, JITCodeEmitter &) { + return true; + } + + /// getEnableTailMergeDefault - the default setting for -enable-tail-merge + /// on this target. User flag overrides. + virtual bool getEnableTailMergeDefault() const { return true; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetMachineRegistry.h b/include/llvm/Target/TargetMachineRegistry.h new file mode 100644 index 0000000..b7ea448 --- /dev/null +++ b/include/llvm/Target/TargetMachineRegistry.h @@ -0,0 +1,97 @@ +//===-- Target/TargetMachineRegistry.h - Target Registration ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes two classes: the TargetMachineRegistry class, which allows +// tools to inspect all of registered targets, and the RegisterTarget class, +// which TargetMachine implementations should use to register themselves with +// the system. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETMACHINEREGISTRY_H +#define LLVM_TARGET_TARGETMACHINEREGISTRY_H + +#include "llvm/Module.h" +#include "llvm/Support/Registry.h" + +namespace llvm { + class Module; + class TargetMachine; + + struct TargetMachineRegistryEntry { + const char *Name; + const char *ShortDesc; + TargetMachine *(*CtorFn)(const Module &, const std::string &); + unsigned (*ModuleMatchQualityFn)(const Module &M); + unsigned (*JITMatchQualityFn)(); + + public: + TargetMachineRegistryEntry(const char *N, const char *SD, + TargetMachine *(*CF)(const Module &, const std::string &), + unsigned (*MMF)(const Module &M), + unsigned (*JMF)()) + : Name(N), ShortDesc(SD), CtorFn(CF), ModuleMatchQualityFn(MMF), + JITMatchQualityFn(JMF) {} + }; + + template<> + class RegistryTraits<TargetMachine> { + public: + typedef TargetMachineRegistryEntry entry; + + static const char *nameof(const entry &Entry) { return Entry.Name; } + static const char *descof(const entry &Entry) { return Entry.ShortDesc; } + }; + + struct TargetMachineRegistry : public Registry<TargetMachine> { + /// getClosestStaticTargetForModule - Given an LLVM module, pick the best + /// target that is compatible with the module. If no close target can be + /// found, this returns null and sets the Error string to a reason. + static const entry *getClosestStaticTargetForModule(const Module &M, + std::string &Error); + + /// getClosestTargetForJIT - Pick the best target that is compatible with + /// the current host. If no close target can be found, this returns null + /// and sets the Error string to a reason. + static const entry *getClosestTargetForJIT(std::string &Error); + + }; + + //===--------------------------------------------------------------------===// + /// RegisterTarget - This class is used to make targets automatically register + /// themselves with the tool they are linked. Targets should define an + /// instance of this and implement the static methods described in the + /// TargetMachine comments. + /// The type 'TargetMachineImpl' should provide a constructor with two + /// parameters: + /// - const Module& M: the module that is being compiled: + /// - const std::string& FS: target-specific string describing target + /// flavour. + + template<class TargetMachineImpl> + struct RegisterTarget { + RegisterTarget(const char *Name, const char *ShortDesc) + : Entry(Name, ShortDesc, &Allocator, + &TargetMachineImpl::getModuleMatchQuality, + &TargetMachineImpl::getJITMatchQuality), + Node(Entry) + {} + + private: + TargetMachineRegistry::entry Entry; + TargetMachineRegistry::node Node; + + static TargetMachine *Allocator(const Module &M, const std::string &FS) { + return new TargetMachineImpl(M, FS); + } + }; + +} + +#endif diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h new file mode 100644 index 0000000..06d7d79 --- /dev/null +++ b/include/llvm/Target/TargetOptions.h @@ -0,0 +1,126 @@ +//===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines command line option flags that are shared across various +// targets. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETOPTIONS_H +#define LLVM_TARGET_TARGETOPTIONS_H + +namespace llvm { + /// PrintMachineCode - This flag is enabled when the -print-machineinstrs + /// option is specified on the command line, and should enable debugging + /// output from the code generator. + extern bool PrintMachineCode; + + /// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is + /// specified on the command line. If the target supports the frame pointer + /// elimination optimization, this option should disable it. + extern bool NoFramePointerElim; + + /// LessPreciseFPMAD - This flag is enabled when the + /// -enable-fp-mad is specified on the command line. When this flag is off + /// (the default), the code generator is not allowed to generate mad + /// (multiply add) if the result is "less precise" than doing those operations + /// individually. + extern bool LessPreciseFPMADOption; + extern bool LessPreciseFPMAD(); + + /// NoExcessFPPrecision - This flag is enabled when the + /// -disable-excess-fp-precision flag is specified on the command line. When + /// this flag is off (the default), the code generator is allowed to produce + /// results that are "more precise" than IEEE allows. This includes use of + /// FMA-like operations and use of the X86 FP registers without rounding all + /// over the place. + extern bool NoExcessFPPrecision; + + /// UnsafeFPMath - This flag is enabled when the + /// -enable-unsafe-fp-math flag is specified on the command line. When + /// this flag is off (the default), the code generator is not allowed to + /// produce results that are "less precise" than IEEE allows. This includes + /// use of X86 instructions like FSIN and FCOS instead of libcalls. + /// UnsafeFPMath implies FiniteOnlyFPMath and LessPreciseFPMAD. + extern bool UnsafeFPMath; + + /// FiniteOnlyFPMath - This returns true when the -enable-finite-only-fp-math + /// option is specified on the command line. If this returns false (default), + /// the code generator is not allowed to assume that FP arithmetic arguments + /// and results are never NaNs or +-Infs. + extern bool FiniteOnlyFPMathOption; + extern bool FiniteOnlyFPMath(); + + /// HonorSignDependentRoundingFPMath - This returns true when the + /// -enable-sign-dependent-rounding-fp-math is specified. If this returns + /// false (the default), the code generator is allowed to assume that the + /// rounding behavior is the default (round-to-zero for all floating point to + /// integer conversions, and round-to-nearest for all other arithmetic + /// truncations). If this is enabled (set to true), the code generator must + /// assume that the rounding mode may dynamically change. + extern bool HonorSignDependentRoundingFPMathOption; + extern bool HonorSignDependentRoundingFPMath(); + + /// UseSoftFloat - This flag is enabled when the -soft-float flag is specified + /// on the command line. When this flag is on, the code generator will + /// generate libcalls to the software floating point library instead of + /// target FP instructions. + extern bool UseSoftFloat; + + /// NoImplicitFloat - This flag is enabled when the -no-implicit-float flag is + /// specified on the command line. When this flag is on, the code generator + /// won't generate any implicit floating point instructions. I.e., no XMM or + /// x87 or vectorized memcpy/memmove instructions. This is for X86 only. + extern bool NoImplicitFloat; + + /// NoZerosInBSS - By default some codegens place zero-initialized data to + /// .bss section. This flag disables such behaviour (necessary, e.g. for + /// crt*.o compiling). + extern bool NoZerosInBSS; + + /// ExceptionHandling - This flag indicates that exception information should + /// be emitted. + extern bool ExceptionHandling; + + /// UnwindTablesMandatory - This flag indicates that unwind tables should + /// be emitted for all functions. + extern bool UnwindTablesMandatory; + + /// PerformTailCallOpt - This flag is enabled when -tailcallopt is specified + /// on the commandline. When the flag is on, the target will perform tail call + /// optimization (pop the caller's stack) providing it supports it. + extern bool PerformTailCallOpt; + + /// StackAlignment - Override default stack alignment for target. + extern unsigned StackAlignment; + + /// RealignStack - This flag indicates, whether stack should be automatically + /// realigned, if needed. + extern bool RealignStack; + + /// DisableJumpTables - This flag indicates jump tables should not be + /// generated. + extern bool DisableJumpTables; + + /// EnableFastISel - This flag enables fast-path instruction selection + /// which trades away generated code quality in favor of reducing + /// compile time. + extern bool EnableFastISel; + + /// StrongPHIElim - This flag enables more aggressive PHI elimination + /// wth earlier copy coalescing. + extern bool StrongPHIElim; + + /// DisableRedZone - This flag disables use of the "Red Zone" on + /// targets which would otherwise have one. + extern bool DisableRedZone; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h new file mode 100644 index 0000000..0218bfd --- /dev/null +++ b/include/llvm/Target/TargetRegisterInfo.h @@ -0,0 +1,656 @@ +//=== Target/TargetRegisterInfo.h - Target Register Information -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes an abstract interface used to get information about a +// target machines register file. This information is used for a variety of +// purposed, especially register allocation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETREGISTERINFO_H +#define LLVM_TARGET_TARGETREGISTERINFO_H + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/ADT/DenseSet.h" +#include <cassert> +#include <functional> + +namespace llvm { + +class BitVector; +class MachineFunction; +class MachineMove; +class RegScavenger; + +/// TargetRegisterDesc - This record contains all of the information known about +/// a particular register. The AliasSet field (if not null) contains a pointer +/// to a Zero terminated array of registers that this register aliases. This is +/// needed for architectures like X86 which have AL alias AX alias EAX. +/// Registers that this does not apply to simply should set this to null. +/// The SubRegs field is a zero terminated array of registers that are +/// sub-registers of the specific register, e.g. AL, AH are sub-registers of AX. +/// The SuperRegs field is a zero terminated array of registers that are +/// super-registers of the specific register, e.g. RAX, EAX, are super-registers +/// of AX. +/// +struct TargetRegisterDesc { + const char *AsmName; // Assembly language name for the register + const char *Name; // Printable name for the reg (for debugging) + const unsigned *AliasSet; // Register Alias Set, described above + const unsigned *SubRegs; // Sub-register set, described above + const unsigned *SuperRegs; // Super-register set, described above +}; + +class TargetRegisterClass { +public: + typedef const unsigned* iterator; + typedef const unsigned* const_iterator; + + typedef const MVT* vt_iterator; + typedef const TargetRegisterClass* const * sc_iterator; +private: + unsigned ID; + const char *Name; + const vt_iterator VTs; + const sc_iterator SubClasses; + const sc_iterator SuperClasses; + const sc_iterator SubRegClasses; + const sc_iterator SuperRegClasses; + const unsigned RegSize, Alignment; // Size & Alignment of register in bytes + const int CopyCost; + const iterator RegsBegin, RegsEnd; + DenseSet<unsigned> RegSet; +public: + TargetRegisterClass(unsigned id, + const char *name, + const MVT *vts, + const TargetRegisterClass * const *subcs, + const TargetRegisterClass * const *supcs, + const TargetRegisterClass * const *subregcs, + const TargetRegisterClass * const *superregcs, + unsigned RS, unsigned Al, int CC, + iterator RB, iterator RE) + : ID(id), Name(name), VTs(vts), SubClasses(subcs), SuperClasses(supcs), + SubRegClasses(subregcs), SuperRegClasses(superregcs), + RegSize(RS), Alignment(Al), CopyCost(CC), RegsBegin(RB), RegsEnd(RE) { + for (iterator I = RegsBegin, E = RegsEnd; I != E; ++I) + RegSet.insert(*I); + } + virtual ~TargetRegisterClass() {} // Allow subclasses + + /// getID() - Return the register class ID number. + /// + unsigned getID() const { return ID; } + + /// getName() - Return the register class name for debugging. + /// + const char *getName() const { return Name; } + + /// begin/end - Return all of the registers in this class. + /// + iterator begin() const { return RegsBegin; } + iterator end() const { return RegsEnd; } + + /// getNumRegs - Return the number of registers in this class. + /// + unsigned getNumRegs() const { return (unsigned)(RegsEnd-RegsBegin); } + + /// getRegister - Return the specified register in the class. + /// + unsigned getRegister(unsigned i) const { + assert(i < getNumRegs() && "Register number out of range!"); + return RegsBegin[i]; + } + + /// contains - Return true if the specified register is included in this + /// register class. + bool contains(unsigned Reg) const { + return RegSet.count(Reg); + } + + /// hasType - return true if this TargetRegisterClass has the ValueType vt. + /// + bool hasType(MVT vt) const { + for(int i = 0; VTs[i] != MVT::Other; ++i) + if (VTs[i] == vt) + return true; + return false; + } + + /// vt_begin / vt_end - Loop over all of the value types that can be + /// represented by values in this register class. + vt_iterator vt_begin() const { + return VTs; + } + + vt_iterator vt_end() const { + vt_iterator I = VTs; + while (*I != MVT::Other) ++I; + return I; + } + + /// subregclasses_begin / subregclasses_end - Loop over all of + /// the subreg register classes of this register class. + sc_iterator subregclasses_begin() const { + return SubRegClasses; + } + + sc_iterator subregclasses_end() const { + sc_iterator I = SubRegClasses; + while (*I != NULL) ++I; + return I; + } + + /// getSubRegisterRegClass - Return the register class of subregisters with + /// index SubIdx, or NULL if no such class exists. + const TargetRegisterClass* getSubRegisterRegClass(unsigned SubIdx) const { + assert(SubIdx>0 && "Invalid subregister index"); + for (unsigned s = 0; s != SubIdx-1; ++s) + if (!SubRegClasses[s]) + return NULL; + return SubRegClasses[SubIdx-1]; + } + + /// superregclasses_begin / superregclasses_end - Loop over all of + /// the superreg register classes of this register class. + sc_iterator superregclasses_begin() const { + return SuperRegClasses; + } + + sc_iterator superregclasses_end() const { + sc_iterator I = SuperRegClasses; + while (*I != NULL) ++I; + return I; + } + + /// hasSubClass - return true if the the specified TargetRegisterClass + /// is a proper subset of this TargetRegisterClass. + bool hasSubClass(const TargetRegisterClass *cs) const { + for (int i = 0; SubClasses[i] != NULL; ++i) + if (SubClasses[i] == cs) + return true; + return false; + } + + /// subclasses_begin / subclasses_end - Loop over all of the classes + /// that are proper subsets of this register class. + sc_iterator subclasses_begin() const { + return SubClasses; + } + + sc_iterator subclasses_end() const { + sc_iterator I = SubClasses; + while (*I != NULL) ++I; + return I; + } + + /// hasSuperClass - return true if the specified TargetRegisterClass is a + /// proper superset of this TargetRegisterClass. + bool hasSuperClass(const TargetRegisterClass *cs) const { + for (int i = 0; SuperClasses[i] != NULL; ++i) + if (SuperClasses[i] == cs) + return true; + return false; + } + + /// superclasses_begin / superclasses_end - Loop over all of the classes + /// that are proper supersets of this register class. + sc_iterator superclasses_begin() const { + return SuperClasses; + } + + sc_iterator superclasses_end() const { + sc_iterator I = SuperClasses; + while (*I != NULL) ++I; + return I; + } + + /// isASubClass - return true if this TargetRegisterClass is a subset + /// class of at least one other TargetRegisterClass. + bool isASubClass() const { + return SuperClasses[0] != 0; + } + + /// allocation_order_begin/end - These methods define a range of registers + /// which specify the registers in this class that are valid to register + /// allocate, and the preferred order to allocate them in. For example, + /// callee saved registers should be at the end of the list, because it is + /// cheaper to allocate caller saved registers. + /// + /// These methods take a MachineFunction argument, which can be used to tune + /// the allocatable registers based on the characteristics of the function. + /// One simple example is that the frame pointer register can be used if + /// frame-pointer-elimination is performed. + /// + /// By default, these methods return all registers in the class. + /// + virtual iterator allocation_order_begin(const MachineFunction &MF) const { + return begin(); + } + virtual iterator allocation_order_end(const MachineFunction &MF) const { + return end(); + } + + /// getSize - Return the size of the register in bytes, which is also the size + /// of a stack slot allocated to hold a spilled copy of this register. + unsigned getSize() const { return RegSize; } + + /// getAlignment - Return the minimum required alignment for a register of + /// this class. + unsigned getAlignment() const { return Alignment; } + + /// getCopyCost - Return the cost of copying a value between two registers in + /// this class. A negative number means the register class is very expensive + /// to copy e.g. status flag register classes. + int getCopyCost() const { return CopyCost; } +}; + + +/// TargetRegisterInfo base class - We assume that the target defines a static +/// array of TargetRegisterDesc objects that represent all of the machine +/// registers that the target has. As such, we simply have to track a pointer +/// to this array so that we can turn register number into a register +/// descriptor. +/// +class TargetRegisterInfo { +protected: + const unsigned* SubregHash; + const unsigned SubregHashSize; + const unsigned* SuperregHash; + const unsigned SuperregHashSize; + const unsigned* AliasesHash; + const unsigned AliasesHashSize; +public: + typedef const TargetRegisterClass * const * regclass_iterator; +private: + const TargetRegisterDesc *Desc; // Pointer to the descriptor array + unsigned NumRegs; // Number of entries in the array + + regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses + + int CallFrameSetupOpcode, CallFrameDestroyOpcode; +protected: + TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR, + regclass_iterator RegClassBegin, + regclass_iterator RegClassEnd, + int CallFrameSetupOpcode = -1, + int CallFrameDestroyOpcode = -1, + const unsigned* subregs = 0, + const unsigned subregsize = 0, + const unsigned* superregs = 0, + const unsigned superregsize = 0, + const unsigned* aliases = 0, + const unsigned aliasessize = 0); + virtual ~TargetRegisterInfo(); +public: + + enum { // Define some target independent constants + /// NoRegister - This physical register is not a real target register. It + /// is useful as a sentinal. + NoRegister = 0, + + /// FirstVirtualRegister - This is the first register number that is + /// considered to be a 'virtual' register, which is part of the SSA + /// namespace. This must be the same for all targets, which means that each + /// target is limited to 1024 registers. + FirstVirtualRegister = 1024 + }; + + /// isPhysicalRegister - Return true if the specified register number is in + /// the physical register namespace. + static bool isPhysicalRegister(unsigned Reg) { + assert(Reg && "this is not a register!"); + return Reg < FirstVirtualRegister; + } + + /// isVirtualRegister - Return true if the specified register number is in + /// the virtual register namespace. + static bool isVirtualRegister(unsigned Reg) { + assert(Reg && "this is not a register!"); + return Reg >= FirstVirtualRegister; + } + + /// getPhysicalRegisterRegClass - Returns the Register Class of a physical + /// register of the given type. If type is MVT::Other, then just return any + /// register class the register belongs to. + virtual const TargetRegisterClass * + getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const; + + /// getAllocatableSet - Returns a bitset indexed by register number + /// indicating if a register is allocatable or not. If a register class is + /// specified, returns the subset for the class. + BitVector getAllocatableSet(MachineFunction &MF, + const TargetRegisterClass *RC = NULL) const; + + const TargetRegisterDesc &operator[](unsigned RegNo) const { + assert(RegNo < NumRegs && + "Attempting to access record for invalid register number!"); + return Desc[RegNo]; + } + + /// Provide a get method, equivalent to [], but more useful if we have a + /// pointer to this object. + /// + const TargetRegisterDesc &get(unsigned RegNo) const { + return operator[](RegNo); + } + + /// getAliasSet - Return the set of registers aliased by the specified + /// register, or a null list of there are none. The list returned is zero + /// terminated. + /// + const unsigned *getAliasSet(unsigned RegNo) const { + return get(RegNo).AliasSet; + } + + /// getSubRegisters - Return the list of registers that are sub-registers of + /// the specified register, or a null list of there are none. The list + /// returned is zero terminated and sorted according to super-sub register + /// relations. e.g. X86::RAX's sub-register list is EAX, AX, AL, AH. + /// + const unsigned *getSubRegisters(unsigned RegNo) const { + return get(RegNo).SubRegs; + } + + /// getSuperRegisters - Return the list of registers that are super-registers + /// of the specified register, or a null list of there are none. The list + /// returned is zero terminated and sorted according to super-sub register + /// relations. e.g. X86::AL's super-register list is RAX, EAX, AX. + /// + const unsigned *getSuperRegisters(unsigned RegNo) const { + return get(RegNo).SuperRegs; + } + + /// getAsmName - Return the symbolic target-specific name for the + /// specified physical register. + const char *getAsmName(unsigned RegNo) const { + return get(RegNo).AsmName; + } + + /// getName - Return the human-readable symbolic target-specific name for the + /// specified physical register. + const char *getName(unsigned RegNo) const { + return get(RegNo).Name; + } + + /// getNumRegs - Return the number of registers this target has (useful for + /// sizing arrays holding per register information) + unsigned getNumRegs() const { + return NumRegs; + } + + /// areAliases - Returns true if the two registers alias each other, false + /// otherwise + bool areAliases(unsigned regA, unsigned regB) const { + size_t index = (regA + regB * 37) & (AliasesHashSize-1); + unsigned ProbeAmt = 0; + while (AliasesHash[index*2] != 0 && + AliasesHash[index*2+1] != 0) { + if (AliasesHash[index*2] == regA && AliasesHash[index*2+1] == regB) + return true; + + index = (index + ProbeAmt) & (AliasesHashSize-1); + ProbeAmt += 2; + } + + return false; + } + + /// regsOverlap - Returns true if the two registers are equal or alias each + /// other. The registers may be virtual register. + bool regsOverlap(unsigned regA, unsigned regB) const { + if (regA == regB) + return true; + + if (isVirtualRegister(regA) || isVirtualRegister(regB)) + return false; + return areAliases(regA, regB); + } + + /// isSubRegister - Returns true if regB is a sub-register of regA. + /// + bool isSubRegister(unsigned regA, unsigned regB) const { + // SubregHash is a simple quadratically probed hash table. + size_t index = (regA + regB * 37) & (SubregHashSize-1); + unsigned ProbeAmt = 2; + while (SubregHash[index*2] != 0 && + SubregHash[index*2+1] != 0) { + if (SubregHash[index*2] == regA && SubregHash[index*2+1] == regB) + return true; + + index = (index + ProbeAmt) & (SubregHashSize-1); + ProbeAmt += 2; + } + + return false; + } + + /// isSuperRegister - Returns true if regB is a super-register of regA. + /// + bool isSuperRegister(unsigned regA, unsigned regB) const { + // SuperregHash is a simple quadratically probed hash table. + size_t index = (regA + regB * 37) & (SuperregHashSize-1); + unsigned ProbeAmt = 2; + while (SuperregHash[index*2] != 0 && + SuperregHash[index*2+1] != 0) { + if (SuperregHash[index*2] == regA && SuperregHash[index*2+1] == regB) + return true; + + index = (index + ProbeAmt) & (SuperregHashSize-1); + ProbeAmt += 2; + } + + return false; + } + + /// getCalleeSavedRegs - Return a null-terminated list of all of the + /// callee saved registers on this target. The register should be in the + /// order of desired callee-save stack frame offset. The first register is + /// closed to the incoming stack pointer if stack grows down, and vice versa. + virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0) + const = 0; + + /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred + /// register classes to spill each callee saved register with. The order and + /// length of this list match the getCalleeSaveRegs() list. + virtual const TargetRegisterClass* const *getCalleeSavedRegClasses( + const MachineFunction *MF) const =0; + + /// getReservedRegs - Returns a bitset indexed by physical register number + /// indicating if a register is a special register that has particular uses + /// and should be considered unavailable at all times, e.g. SP, RA. This is + /// used by register scavenger to determine what registers are free. + virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0; + + /// getSubReg - Returns the physical register number of sub-register "Index" + /// for physical register RegNo. Return zero if the sub-register does not + /// exist. + virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0; + + /// getMatchingSuperReg - Return a super-register of the specified register + /// Reg so its sub-register of index SubIdx is Reg. + unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, + const TargetRegisterClass *RC) const { + for (const unsigned *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs) + if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR)) + return SR; + return 0; + } + + //===--------------------------------------------------------------------===// + // Register Class Information + // + + /// Register class iterators + /// + regclass_iterator regclass_begin() const { return RegClassBegin; } + regclass_iterator regclass_end() const { return RegClassEnd; } + + unsigned getNumRegClasses() const { + return (unsigned)(regclass_end()-regclass_begin()); + } + + /// getRegClass - Returns the register class associated with the enumeration + /// value. See class TargetOperandInfo. + const TargetRegisterClass *getRegClass(unsigned i) const { + assert(i <= getNumRegClasses() && "Register Class ID out of range"); + return i ? RegClassBegin[i - 1] : NULL; + } + + /// getPointerRegClass - Returns a TargetRegisterClass used for pointer + /// values. + virtual const TargetRegisterClass *getPointerRegClass() const { + assert(0 && "Target didn't implement getPointerRegClass!"); + return 0; // Must return a value in order to compile with VS 2005 + } + + /// getCrossCopyRegClass - Returns a legal register class to copy a register + /// in the specified class to or from. Returns NULL if it is possible to copy + /// between a two registers of the specified class. + virtual const TargetRegisterClass * + getCrossCopyRegClass(const TargetRegisterClass *RC) const { + return NULL; + } + + /// targetHandlesStackFrameRounding - Returns true if the target is + /// responsible for rounding up the stack frame (probably at emitPrologue + /// time). + virtual bool targetHandlesStackFrameRounding() const { + return false; + } + + /// requiresRegisterScavenging - returns true if the target requires (and can + /// make use of) the register scavenger. + virtual bool requiresRegisterScavenging(const MachineFunction &MF) const { + return false; + } + + /// hasFP - Return true if the specified function should have a dedicated + /// frame pointer register. For most targets this is true only if the function + /// has variable sized allocas or if frame pointer elimination is disabled. + virtual bool hasFP(const MachineFunction &MF) const = 0; + + // hasReservedCallFrame - Under normal circumstances, when a frame pointer is + // not required, we reserve argument space for call sites in the function + // immediately on entry to the current function. This eliminates the need for + // add/sub sp brackets around call sites. Returns true if the call frame is + // included as part of the stack frame. + virtual bool hasReservedCallFrame(MachineFunction &MF) const { + return !hasFP(MF); + } + + // needsStackRealignment - true if storage within the function requires the + // stack pointer to be aligned more than the normal calling convention calls + // for. + virtual bool needsStackRealignment(const MachineFunction &MF) const { + return false; + } + + /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the + /// frame setup/destroy instructions if they exist (-1 otherwise). Some + /// targets use pseudo instructions in order to abstract away the difference + /// between operating with a frame pointer and operating without, through the + /// use of these two instructions. + /// + int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } + int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } + + /// eliminateCallFramePseudoInstr - This method is called during prolog/epilog + /// code insertion to eliminate call frame setup and destroy pseudo + /// instructions (but only if the Target is using them). It is responsible + /// for eliminating these instructions, replacing them with concrete + /// instructions. This method need only be implemented if using call frame + /// setup/destroy pseudo instructions. + /// + virtual void + eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const { + assert(getCallFrameSetupOpcode()== -1 && getCallFrameDestroyOpcode()== -1 && + "eliminateCallFramePseudoInstr must be implemented if using" + " call frame setup/destroy pseudo instructions!"); + assert(0 && "Call Frame Pseudo Instructions do not exist on this target!"); + } + + /// processFunctionBeforeCalleeSavedScan - This method is called immediately + /// before PrologEpilogInserter scans the physical registers used to determine + /// what callee saved registers should be spilled. This method is optional. + virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS = NULL) const { + + } + + /// processFunctionBeforeFrameFinalized - This method is called immediately + /// before the specified functions frame layout (MF.getFrameInfo()) is + /// finalized. Once the frame is finalized, MO_FrameIndex operands are + /// replaced with direct constants. This method is optional. + /// + virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const { + } + + /// eliminateFrameIndex - This method must be overriden to eliminate abstract + /// frame indices from instructions which may use them. The instruction + /// referenced by the iterator contains an MO_FrameIndex operand which must be + /// eliminated by this method. This method may modify or replace the + /// specified instruction, as long as it keeps the iterator pointing the the + /// finished product. SPAdj is the SP adjustment due to call frame setup + /// instruction. + virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, + int SPAdj, RegScavenger *RS=NULL) const = 0; + + /// emitProlog/emitEpilog - These methods insert prolog and epilog code into + /// the function. + virtual void emitPrologue(MachineFunction &MF) const = 0; + virtual void emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const = 0; + + //===--------------------------------------------------------------------===// + /// Debug information queries. + + /// getDwarfRegNum - Map a target register to an equivalent dwarf register + /// number. Returns -1 if there is no equivalent value. The second + /// parameter allows targets to use different numberings for EH info and + /// debugging info. + virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const = 0; + + /// getFrameRegister - This method should return the register used as a base + /// for values allocated in the current stack frame. + virtual unsigned getFrameRegister(MachineFunction &MF) const = 0; + + /// getFrameIndexOffset - Returns the displacement from the frame register to + /// the stack frame of the specified index. + virtual int getFrameIndexOffset(MachineFunction &MF, int FI) const; + + /// getRARegister - This method should return the register where the return + /// address can be found. + virtual unsigned getRARegister() const = 0; + + /// getInitialFrameState - Returns a list of machine moves that are assumed + /// on entry to all functions. Note that LabelID is ignored (assumed to be + /// the beginning of the function.) + virtual void getInitialFrameState(std::vector<MachineMove> &Moves) const; +}; + + +// This is useful when building IndexedMaps keyed on virtual registers +struct VirtReg2IndexFunctor : std::unary_function<unsigned, unsigned> { + unsigned operator()(unsigned Reg) const { + return Reg - TargetRegisterInfo::FirstVirtualRegister; + } +}; + +/// getCommonSubClass - find the largest common subclass of A and B. Return NULL +/// if there is no common subclass. +const TargetRegisterClass *getCommonSubClass(const TargetRegisterClass *A, + const TargetRegisterClass *B); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td new file mode 100644 index 0000000..38461c5 --- /dev/null +++ b/include/llvm/Target/TargetSchedule.td @@ -0,0 +1,72 @@ +//===- TargetSchedule.td - Target Independent Scheduling ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent scheduling interfaces which should +// be implemented by each target which is using TableGen based scheduling. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Processor functional unit - These values represent the function units +// available across all chip sets for the target. Eg., IntUnit, FPUnit, ... +// These may be independent values for each chip set or may be shared across +// all chip sets of the target. Each functional unit is treated as a resource +// during scheduling and has an affect instruction order based on availability +// during a time interval. +// +class FuncUnit; + +//===----------------------------------------------------------------------===// +// Instruction stage - These values represent a step in the execution of an +// instruction. The latency represents the number of discrete time slots used +// need to complete the stage. Units represent the choice of functional units +// that can be used to complete the stage. Eg. IntUnit1, IntUnit2. +// +class InstrStage<int cycles, list<FuncUnit> units> { + int Cycles = cycles; // length of stage in machine cycles + list<FuncUnit> Units = units; // choice of functional units +} + +//===----------------------------------------------------------------------===// +// Instruction itinerary - An itinerary represents a sequential series of steps +// required to complete an instruction. Itineraries are represented as lists of +// instruction stages. +// + +//===----------------------------------------------------------------------===// +// Instruction itinerary classes - These values represent 'named' instruction +// itinerary. Using named itineraries simplifies managing groups of +// instructions across chip sets. An instruction uses the same itinerary class +// across all chip sets. Thus a new chip set can be added without modifying +// instruction information. +// +class InstrItinClass; +def NoItinerary : InstrItinClass; + +//===----------------------------------------------------------------------===// +// Instruction itinerary data - These values provide a runtime map of an +// instruction itinerary class (name) to it's itinerary data. +// +class InstrItinData<InstrItinClass Class, list<InstrStage> stages> { + InstrItinClass TheClass = Class; + list<InstrStage> Stages = stages; +} + +//===----------------------------------------------------------------------===// +// Processor itineraries - These values represent the set of all itinerary +// classes for a given chip set. +// +class ProcessorItineraries<list<InstrItinData> iid> { + list<InstrItinData> IID = iid; +} + +// NoItineraries - A marker that can be used by processors without schedule +// info. +def NoItineraries : ProcessorItineraries<[]>; + diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td new file mode 100644 index 0000000..2cd2967 --- /dev/null +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -0,0 +1,864 @@ +//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the target-independent interfaces used by SelectionDAG +// instruction selection generators. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Selection DAG Type Constraint definitions. +// +// Note that the semantics of these constraints are hard coded into tblgen. To +// modify or add constraints, you have to hack tblgen. +// + +class SDTypeConstraint<int opnum> { + int OperandNum = opnum; +} + +// SDTCisVT - The specified operand has exactly this VT. +class SDTCisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> { + ValueType VT = vt; +} + +class SDTCisPtrTy<int OpNum> : SDTypeConstraint<OpNum>; + +// SDTCisInt - The specified operand is has integer type. +class SDTCisInt<int OpNum> : SDTypeConstraint<OpNum>; + +// SDTCisFP - The specified operand is has floating point type. +class SDTCisFP<int OpNum> : SDTypeConstraint<OpNum>; + +// SDTCisSameAs - The two specified operands have identical types. +class SDTCisSameAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> { + int OtherOperandNum = OtherOp; +} + +// SDTCisVTSmallerThanOp - The specified operand is a VT SDNode, and its type is +// smaller than the 'Other' operand. +class SDTCisVTSmallerThanOp<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> { + int OtherOperandNum = OtherOp; +} + +class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{ + int BigOperandNum = BigOp; +} + +/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same +/// type as the element type of OtherOp, which is a vector type. +class SDTCisEltOfVec<int ThisOp, int OtherOp> + : SDTypeConstraint<ThisOp> { + int OtherOpNum = OtherOp; +} + +//===----------------------------------------------------------------------===// +// Selection DAG Type Profile definitions. +// +// These use the constraints defined above to describe the type requirements of +// the various nodes. These are not hard coded into tblgen, allowing targets to +// add their own if needed. +// + +// SDTypeProfile - This profile describes the type requirements of a Selection +// DAG node. +class SDTypeProfile<int numresults, int numoperands, + list<SDTypeConstraint> constraints> { + int NumResults = numresults; + int NumOperands = numoperands; + list<SDTypeConstraint> Constraints = constraints; +} + +// Builtin profiles. +def SDTIntLeaf: SDTypeProfile<1, 0, [SDTCisInt<0>]>; // for 'imm'. +def SDTFPLeaf : SDTypeProfile<1, 0, [SDTCisFP<0>]>; // for 'fpimm'. +def SDTPtrLeaf: SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>; // for '&g'. +def SDTOther : SDTypeProfile<1, 0, [SDTCisVT<0, OtherVT>]>; // for 'vt'. +def SDTUNDEF : SDTypeProfile<1, 0, []>; // for 'undef'. +def SDTUnaryOp : SDTypeProfile<1, 1, []>; // for bitconvert. + +def SDTIntBinOp : SDTypeProfile<1, 2, [ // add, and, or, xor, udiv, etc. + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0> +]>; +def SDTIntShiftOp : SDTypeProfile<1, 2, [ // shl, sra, srl + SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2> +]>; +def SDTFPBinOp : SDTypeProfile<1, 2, [ // fadd, fmul, etc. + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0> +]>; +def SDTFPSignOp : SDTypeProfile<1, 2, [ // fcopysign. + SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisFP<2> +]>; +def SDTFPTernaryOp : SDTypeProfile<1, 3, [ // fmadd, fnmsub, etc. + SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisFP<0> +]>; +def SDTIntUnaryOp : SDTypeProfile<1, 1, [ // ctlz + SDTCisSameAs<0, 1>, SDTCisInt<0> +]>; +def SDTIntExtendOp : SDTypeProfile<1, 1, [ // sext, zext, anyext + SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0> +]>; +def SDTIntTruncOp : SDTypeProfile<1, 1, [ // trunc + SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<0, 1> +]>; +def SDTFPUnaryOp : SDTypeProfile<1, 1, [ // fneg, fsqrt, etc + SDTCisSameAs<0, 1>, SDTCisFP<0> +]>; +def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround + SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1> +]>; +def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend + SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0> +]>; +def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp + SDTCisFP<0>, SDTCisInt<1> +]>; +def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int + SDTCisInt<0>, SDTCisFP<1> +]>; +def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg + SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, OtherVT>, + SDTCisVTSmallerThanOp<2, 1> +]>; + +def SDTSetCC : SDTypeProfile<1, 3, [ // setcc + SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT> +]>; + +def SDTSelect : SDTypeProfile<1, 3, [ // select + SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3> +]>; + +def SDTSelectCC : SDTypeProfile<1, 5, [ // select_cc + SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisSameAs<0, 3>, + SDTCisVT<5, OtherVT> +]>; + +def SDTBr : SDTypeProfile<0, 1, [ // br + SDTCisVT<0, OtherVT> +]>; + +def SDTBrcond : SDTypeProfile<0, 2, [ // brcond + SDTCisInt<0>, SDTCisVT<1, OtherVT> +]>; + +def SDTBrind : SDTypeProfile<0, 1, [ // brind + SDTCisPtrTy<0> +]>; + +def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap + +def SDTLoad : SDTypeProfile<1, 1, [ // load + SDTCisPtrTy<1> +]>; + +def SDTStore : SDTypeProfile<0, 2, [ // store + SDTCisPtrTy<1> +]>; + +def SDTIStore : SDTypeProfile<1, 3, [ // indexed store + SDTCisSameAs<0, 2>, SDTCisPtrTy<0>, SDTCisPtrTy<3> +]>; + +def SDTVecShuffle : SDTypeProfile<1, 2, [ + SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2> +]>; +def SDTVecExtract : SDTypeProfile<1, 2, [ // vector extract + SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2> +]>; +def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert + SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3> +]>; + +def STDPrefetch : SDTypeProfile<0, 3, [ // prefetch + SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisInt<1> +]>; + +def STDMemBarrier : SDTypeProfile<0, 5, [ // memory barier + SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>, + SDTCisInt<0> +]>; +def STDAtomic3 : SDTypeProfile<1, 3, [ + SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; +def STDAtomic2 : SDTypeProfile<1, 2, [ + SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; + +def SDTConvertOp : SDTypeProfile<1, 5, [ //cvtss, su, us, uu, ff, fs, fu, sf, su + SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>, SDTCisPtrTy<4>, SDTCisPtrTy<5> +]>; + +class SDCallSeqStart<list<SDTypeConstraint> constraints> : + SDTypeProfile<0, 1, constraints>; +class SDCallSeqEnd<list<SDTypeConstraint> constraints> : + SDTypeProfile<0, 2, constraints>; + +//===----------------------------------------------------------------------===// +// Selection DAG Node Properties. +// +// Note: These are hard coded into tblgen. +// +class SDNodeProperty; +def SDNPCommutative : SDNodeProperty; // X op Y == Y op X +def SDNPAssociative : SDNodeProperty; // (X op Y) op Z == X op (Y op Z) +def SDNPHasChain : SDNodeProperty; // R/W chain operand and result +def SDNPOutFlag : SDNodeProperty; // Write a flag result +def SDNPInFlag : SDNodeProperty; // Read a flag operand +def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand +def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'. +def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'. +def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'. +def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand + +//===----------------------------------------------------------------------===// +// Selection DAG Node definitions. +// +class SDNode<string opcode, SDTypeProfile typeprof, + list<SDNodeProperty> props = [], string sdclass = "SDNode"> { + string Opcode = opcode; + string SDClass = sdclass; + list<SDNodeProperty> Properties = props; + SDTypeProfile TypeProfile = typeprof; +} + +def set; +def implicit; +def parallel; +def node; +def srcvalue; + +def imm : SDNode<"ISD::Constant" , SDTIntLeaf , [], "ConstantSDNode">; +def timm : SDNode<"ISD::TargetConstant",SDTIntLeaf, [], "ConstantSDNode">; +def fpimm : SDNode<"ISD::ConstantFP", SDTFPLeaf , [], "ConstantFPSDNode">; +def vt : SDNode<"ISD::VALUETYPE" , SDTOther , [], "VTSDNode">; +def bb : SDNode<"ISD::BasicBlock", SDTOther , [], "BasicBlockSDNode">; +def cond : SDNode<"ISD::CONDCODE" , SDTOther , [], "CondCodeSDNode">; +def undef : SDNode<"ISD::UNDEF" , SDTUNDEF , []>; +def globaladdr : SDNode<"ISD::GlobalAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def tglobaladdr : SDNode<"ISD::TargetGlobalAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def globaltlsaddr : SDNode<"ISD::GlobalTLSAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def tglobaltlsaddr : SDNode<"ISD::TargetGlobalTLSAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def constpool : SDNode<"ISD::ConstantPool", SDTPtrLeaf, [], + "ConstantPoolSDNode">; +def tconstpool : SDNode<"ISD::TargetConstantPool", SDTPtrLeaf, [], + "ConstantPoolSDNode">; +def jumptable : SDNode<"ISD::JumpTable", SDTPtrLeaf, [], + "JumpTableSDNode">; +def tjumptable : SDNode<"ISD::TargetJumpTable", SDTPtrLeaf, [], + "JumpTableSDNode">; +def frameindex : SDNode<"ISD::FrameIndex", SDTPtrLeaf, [], + "FrameIndexSDNode">; +def tframeindex : SDNode<"ISD::TargetFrameIndex", SDTPtrLeaf, [], + "FrameIndexSDNode">; +def externalsym : SDNode<"ISD::ExternalSymbol", SDTPtrLeaf, [], + "ExternalSymbolSDNode">; +def texternalsym: SDNode<"ISD::TargetExternalSymbol", SDTPtrLeaf, [], + "ExternalSymbolSDNode">; + +def add : SDNode<"ISD::ADD" , SDTIntBinOp , + [SDNPCommutative, SDNPAssociative]>; +def sub : SDNode<"ISD::SUB" , SDTIntBinOp>; +def mul : SDNode<"ISD::MUL" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def mulhs : SDNode<"ISD::MULHS" , SDTIntBinOp, [SDNPCommutative]>; +def mulhu : SDNode<"ISD::MULHU" , SDTIntBinOp, [SDNPCommutative]>; +def sdiv : SDNode<"ISD::SDIV" , SDTIntBinOp>; +def udiv : SDNode<"ISD::UDIV" , SDTIntBinOp>; +def srem : SDNode<"ISD::SREM" , SDTIntBinOp>; +def urem : SDNode<"ISD::UREM" , SDTIntBinOp>; +def srl : SDNode<"ISD::SRL" , SDTIntShiftOp>; +def sra : SDNode<"ISD::SRA" , SDTIntShiftOp>; +def shl : SDNode<"ISD::SHL" , SDTIntShiftOp>; +def rotl : SDNode<"ISD::ROTL" , SDTIntShiftOp>; +def rotr : SDNode<"ISD::ROTR" , SDTIntShiftOp>; +def and : SDNode<"ISD::AND" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def or : SDNode<"ISD::OR" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def xor : SDNode<"ISD::XOR" , SDTIntBinOp, + [SDNPCommutative, SDNPAssociative]>; +def addc : SDNode<"ISD::ADDC" , SDTIntBinOp, + [SDNPCommutative, SDNPOutFlag]>; +def adde : SDNode<"ISD::ADDE" , SDTIntBinOp, + [SDNPCommutative, SDNPOutFlag, SDNPInFlag]>; +def subc : SDNode<"ISD::SUBC" , SDTIntBinOp, + [SDNPOutFlag]>; +def sube : SDNode<"ISD::SUBE" , SDTIntBinOp, + [SDNPOutFlag, SDNPInFlag]>; + +def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>; +def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>; +def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>; +def cttz : SDNode<"ISD::CTTZ" , SDTIntUnaryOp>; +def ctpop : SDNode<"ISD::CTPOP" , SDTIntUnaryOp>; +def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>; +def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>; +def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>; +def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>; +def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>; +def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>; +def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>; + + +def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>; +def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>; +def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>; +def fdiv : SDNode<"ISD::FDIV" , SDTFPBinOp>; +def frem : SDNode<"ISD::FREM" , SDTFPBinOp>; +def fabs : SDNode<"ISD::FABS" , SDTFPUnaryOp>; +def fneg : SDNode<"ISD::FNEG" , SDTFPUnaryOp>; +def fsqrt : SDNode<"ISD::FSQRT" , SDTFPUnaryOp>; +def fsin : SDNode<"ISD::FSIN" , SDTFPUnaryOp>; +def fcos : SDNode<"ISD::FCOS" , SDTFPUnaryOp>; +def frint : SDNode<"ISD::FRINT" , SDTFPUnaryOp>; +def ftrunc : SDNode<"ISD::FTRUNC" , SDTFPUnaryOp>; +def fceil : SDNode<"ISD::FCEIL" , SDTFPUnaryOp>; +def ffloor : SDNode<"ISD::FFLOOR" , SDTFPUnaryOp>; +def fnearbyint : SDNode<"ISD::FNEARBYINT" , SDTFPUnaryOp>; + +def fround : SDNode<"ISD::FP_ROUND" , SDTFPRoundOp>; +def fextend : SDNode<"ISD::FP_EXTEND" , SDTFPExtendOp>; +def fcopysign : SDNode<"ISD::FCOPYSIGN" , SDTFPSignOp>; + +def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>; +def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>; +def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>; +def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>; + +def setcc : SDNode<"ISD::SETCC" , SDTSetCC>; +def select : SDNode<"ISD::SELECT" , SDTSelect>; +def selectcc : SDNode<"ISD::SELECT_CC" , SDTSelectCC>; +def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>; + +def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>; +def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>; +def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>; +def ret : SDNode<"ISD::RET" , SDTNone, [SDNPHasChain]>; +def trap : SDNode<"ISD::TRAP" , SDTNone, + [SDNPHasChain, SDNPSideEffect]>; + +def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch, + [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>; + +def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, + [SDNPHasChain, SDNPSideEffect]>; + +def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; + +// Do not use ld, st directly. Use load, extload, sextload, zextload, store, +// and truncst (see below). +def ld : SDNode<"ISD::LOAD" , SDTLoad, + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def st : SDNode<"ISD::STORE" , SDTStore, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; +def ist : SDNode<"ISD::STORE" , SDTIStore, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; + +def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>; +def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, -1, []>, []>; +def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>, + []>; +def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT", + SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>; +def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>; + +// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use +// these internally. Don't reference these directly. +def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID", + SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, + [SDNPHasChain]>; +def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN", + SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, + [SDNPHasChain]>; +def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN", + SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>; + +// Do not use cvt directly. Use cvt forms below +def cvt : SDNode<"ISD::CONVERT_RNDSAT", SDTConvertOp>; + +//===----------------------------------------------------------------------===// +// Selection DAG Condition Codes + +class CondCode; // ISD::CondCode enums +def SETOEQ : CondCode; def SETOGT : CondCode; +def SETOGE : CondCode; def SETOLT : CondCode; def SETOLE : CondCode; +def SETONE : CondCode; def SETO : CondCode; def SETUO : CondCode; +def SETUEQ : CondCode; def SETUGT : CondCode; def SETUGE : CondCode; +def SETULT : CondCode; def SETULE : CondCode; def SETUNE : CondCode; + +def SETEQ : CondCode; def SETGT : CondCode; def SETGE : CondCode; +def SETLT : CondCode; def SETLE : CondCode; def SETNE : CondCode; + + +//===----------------------------------------------------------------------===// +// Selection DAG Node Transformation Functions. +// +// This mechanism allows targets to manipulate nodes in the output DAG once a +// match has been formed. This is typically used to manipulate immediate +// values. +// +class SDNodeXForm<SDNode opc, code xformFunction> { + SDNode Opcode = opc; + code XFormFunction = xformFunction; +} + +def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>; + + +//===----------------------------------------------------------------------===// +// Selection DAG Pattern Fragments. +// +// Pattern fragments are reusable chunks of dags that match specific things. +// They can take arguments and have C++ predicates that control whether they +// match. They are intended to make the patterns for common instructions more +// compact and readable. +// + +/// PatFrag - Represents a pattern fragment. This can match something on the +/// DAG, frame a single node to multiply nested other fragments. +/// +class PatFrag<dag ops, dag frag, code pred = [{}], + SDNodeXForm xform = NOOP_SDNodeXForm> { + dag Operands = ops; + dag Fragment = frag; + code Predicate = pred; + SDNodeXForm OperandTransform = xform; +} + +// PatLeaf's are pattern fragments that have no operands. This is just a helper +// to define immediates and other common things concisely. +class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm> + : PatFrag<(ops), frag, pred, xform>; + +// Leaf fragments. + +def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; +def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; + +def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>; +def immAllOnesV: PatLeaf<(build_vector), [{ + return ISD::isBuildVectorAllOnes(N); +}]>; +def immAllOnesV_bc: PatLeaf<(bitconvert), [{ + return ISD::isBuildVectorAllOnes(N); +}]>; +def immAllZerosV: PatLeaf<(build_vector), [{ + return ISD::isBuildVectorAllZeros(N); +}]>; +def immAllZerosV_bc: PatLeaf<(bitconvert), [{ + return ISD::isBuildVectorAllZeros(N); +}]>; + + + +// Other helper fragments. +def not : PatFrag<(ops node:$in), (xor node:$in, immAllOnes)>; +def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>; +def vnot_conv : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV_bc)>; +def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>; + +// load fragments. +def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; +}]>; +def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD; +}]>; + +// extending load fragments. +def extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD; +}]>; +def sextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; +}]>; +def zextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ + return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; +}]>; + +def extloadi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def extloadi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; +def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64; +}]>; + +def sextloadi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def sextloadi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + +def zextloadi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def zextloadi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; + +// store fragments. +def unindexedstore : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; +}]>; +def store : PatFrag<(ops node:$val, node:$ptr), + (unindexedstore node:$val, node:$ptr), [{ + return !cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; + +// truncstore fragments. +def truncstore : PatFrag<(ops node:$val, node:$ptr), + (unindexedstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; +def truncstorei8 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def truncstorei16 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def truncstorei32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def truncstoref32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; +def truncstoref64 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64; +}]>; + +// indexed store fragments. +def istore : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + return !cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; + +def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset), + (istore node:$val, node:$base, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::PRE_INC || AM == ISD::PRE_DEC; +}]>; + +def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->isTruncatingStore(); +}]>; +def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), + (itruncstore node:$val, node:$base, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::PRE_INC || AM == ISD::PRE_DEC; +}]>; +def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (pre_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; + +def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), + (istore node:$val, node:$ptr, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::POST_INC || AM == ISD::POST_DEC; +}]>; + +def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), + (itruncstore node:$val, node:$base, node:$offset), [{ + ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode(); + return AM == ISD::POST_INC || AM == ISD::POST_DEC; +}]>; +def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1; +}]>; +def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (post_truncst node:$val, node:$base, node:$offset), [{ + return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32; +}]>; + +// setcc convenience fragments. +def setoeq : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOEQ)>; +def setogt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOGT)>; +def setoge : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOGE)>; +def setolt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOLT)>; +def setole : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETOLE)>; +def setone : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETONE)>; +def seto : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETO)>; +def setuo : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUO)>; +def setueq : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUEQ)>; +def setugt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUGT)>; +def setuge : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUGE)>; +def setult : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETULT)>; +def setule : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETULE)>; +def setune : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETUNE)>; +def seteq : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETEQ)>; +def setgt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETGT)>; +def setge : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETGE)>; +def setlt : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETLT)>; +def setle : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETLE)>; +def setne : PatFrag<(ops node:$lhs, node:$rhs), + (setcc node:$lhs, node:$rhs, SETNE)>; + +def atomic_cmp_swap_8 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8; +}]>; +def atomic_cmp_swap_16 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16; +}]>; +def atomic_cmp_swap_32 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32; +}]>; +def atomic_cmp_swap_64 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64; +}]>; + +multiclass binary_atomic_op<SDNode atomic_op> { + def _8 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8; + }]>; + def _16 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16; + }]>; + def _32 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32; + }]>; + def _64 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64; + }]>; +} + +defm atomic_load_add : binary_atomic_op<atomic_load_add>; +defm atomic_swap : binary_atomic_op<atomic_swap>; +defm atomic_load_sub : binary_atomic_op<atomic_load_sub>; +defm atomic_load_and : binary_atomic_op<atomic_load_and>; +defm atomic_load_or : binary_atomic_op<atomic_load_or>; +defm atomic_load_xor : binary_atomic_op<atomic_load_xor>; +defm atomic_load_nand : binary_atomic_op<atomic_load_nand>; +defm atomic_load_min : binary_atomic_op<atomic_load_min>; +defm atomic_load_max : binary_atomic_op<atomic_load_max>; +defm atomic_load_umin : binary_atomic_op<atomic_load_umin>; +defm atomic_load_umax : binary_atomic_op<atomic_load_umax>; + +//===----------------------------------------------------------------------===// +// Selection DAG CONVERT_RNDSAT patterns + +def cvtff : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF; + }]>; + +def cvtss : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS; + }]>; + +def cvtsu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU; + }]>; + +def cvtus : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US; + }]>; + +def cvtuu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU; + }]>; + +def cvtsf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF; + }]>; + +def cvtuf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF; + }]>; + +def cvtfs : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS; + }]>; + +def cvtfu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat), + (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{ + return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU; + }]>; + +//===----------------------------------------------------------------------===// +// Selection DAG Pattern Support. +// +// Patterns are what are actually matched against the target-flavored +// instruction selection DAG. Instructions defined by the target implicitly +// define patterns in most cases, but patterns can also be explicitly added when +// an operation is defined by a sequence of instructions (e.g. loading a large +// immediate value on RISC targets that do not support immediates as large as +// their GPRs). +// + +class Pattern<dag patternToMatch, list<dag> resultInstrs> { + dag PatternToMatch = patternToMatch; + list<dag> ResultInstrs = resultInstrs; + list<Predicate> Predicates = []; // See class Instruction in Target.td. + int AddedComplexity = 0; // See class Instruction in Target.td. +} + +// Pat - A simple (but common) form of a pattern, which produces a simple result +// not needing a full list. +class Pat<dag pattern, dag result> : Pattern<pattern, [result]>; + +//===----------------------------------------------------------------------===// +// Complex pattern definitions. +// + +class CPAttribute; +// Pass the parent Operand as root to CP function rather +// than the root of the sub-DAG +def CPAttrParentAsRoot : CPAttribute; + +// Complex patterns, e.g. X86 addressing mode, requires pattern matching code +// in C++. NumOperands is the number of operands returned by the select function; +// SelectFunc is the name of the function used to pattern match the max. pattern; +// RootNodes are the list of possible root nodes of the sub-dags to match. +// e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>; +// +class ComplexPattern<ValueType ty, int numops, string fn, + list<SDNode> roots = [], list<SDNodeProperty> props = [], + list<CPAttribute> attrs = []> { + ValueType Ty = ty; + int NumOperands = numops; + string SelectFunc = fn; + list<SDNode> RootNodes = roots; + list<SDNodeProperty> Properties = props; + list<CPAttribute> Attributes = attrs; +} + +//===----------------------------------------------------------------------===// +// Dwarf support. +// +def SDT_dwarf_loc : SDTypeProfile<0, 3, + [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>; +def dwarf_loc : SDNode<"ISD::DEBUG_LOC", SDT_dwarf_loc,[SDNPHasChain]>; diff --git a/include/llvm/Target/TargetSubtarget.h b/include/llvm/Target/TargetSubtarget.h new file mode 100644 index 0000000..eca45eb --- /dev/null +++ b/include/llvm/Target/TargetSubtarget.h @@ -0,0 +1,42 @@ +//==-- llvm/Target/TargetSubtarget.h - Target Information --------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the subtarget options of a Target machine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETSUBTARGET_H +#define LLVM_TARGET_TARGETSUBTARGET_H + +namespace llvm { + +//===----------------------------------------------------------------------===// +/// +/// TargetSubtarget - Generic base class for all target subtargets. All +/// Target-specific options that control code generation and printing should +/// be exposed through a TargetSubtarget-derived class. +/// +class TargetSubtarget { + TargetSubtarget(const TargetSubtarget&); // DO NOT IMPLEMENT + void operator=(const TargetSubtarget&); // DO NOT IMPLEMENT +protected: // Can only create subclasses... + TargetSubtarget(); +public: + virtual ~TargetSubtarget(); + + /// getSpecialAddressLatency - For targets where it is beneficial to + /// backschedule instructions that compute addresses, return a value + /// indicating the number of scheduling cycles of backscheduling that + /// should be attempted. + virtual unsigned getSpecialAddressLatency() const { return 0; } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h new file mode 100644 index 0000000..4372ea0 --- /dev/null +++ b/include/llvm/Transforms/IPO.h @@ -0,0 +1,219 @@ +//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes for accessor functions that expose passes +// in the IPO transformations library. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_IPO_H +#define LLVM_TRANSFORMS_IPO_H + +#include <vector> + +namespace llvm { + +class FunctionPass; +class ModulePass; +class Pass; +class Function; +class BasicBlock; +class GlobalValue; + +//===----------------------------------------------------------------------===// +// +// These functions removes symbols from functions and modules. If OnlyDebugInfo +// is true, only debugging information is removed from the module. +// +ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false); + +//===----------------------------------------------------------------------===// +// +// These functions strips symbols from functions and modules. +// Only debugging information is not stripped. +// +ModulePass *createStripNonDebugSymbolsPass(); + +//===----------------------------------------------------------------------===// +// +// These pass removes llvm.dbg.declare intrinsics. +ModulePass *createStripDebugDeclarePass(); + +//===----------------------------------------------------------------------===// +/// createLowerSetJmpPass - This function lowers the setjmp/longjmp intrinsics +/// to invoke/unwind instructions. This should really be part of the C/C++ +/// front-end, but it's so much easier to write transformations in LLVM proper. +/// +ModulePass *createLowerSetJmpPass(); + +//===----------------------------------------------------------------------===// +/// createConstantMergePass - This function returns a new pass that merges +/// duplicate global constants together into a single constant that is shared. +/// This is useful because some passes (ie TraceValues) insert a lot of string +/// constants into the program, regardless of whether or not they duplicate an +/// existing string. +/// +ModulePass *createConstantMergePass(); + + +//===----------------------------------------------------------------------===// +/// createGlobalOptimizerPass - This function returns a new pass that optimizes +/// non-address taken internal globals. +/// +ModulePass *createGlobalOptimizerPass(); + + +//===----------------------------------------------------------------------===// +/// createRaiseAllocationsPass - Return a new pass that transforms malloc and +/// free function calls into malloc and free instructions. +/// +ModulePass *createRaiseAllocationsPass(); + + +//===----------------------------------------------------------------------===// +/// createDeadTypeEliminationPass - Return a new pass that eliminates symbol +/// table entries for types that are never used. +/// +ModulePass *createDeadTypeEliminationPass(); + + +//===----------------------------------------------------------------------===// +/// createGlobalDCEPass - This transform is designed to eliminate unreachable +/// internal globals (functions or global variables) +/// +ModulePass *createGlobalDCEPass(); + + +//===----------------------------------------------------------------------===// +/// createGVExtractionPass - If deleteFn is true, this pass deletes as +/// the specified global values. Otherwise, it deletes as much of the module as +/// possible, except for the global values specified. +/// +ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool + deleteFn = false, + bool relinkCallees = false); + +//===----------------------------------------------------------------------===// +/// createFunctionInliningPass - Return a new pass object that uses a heuristic +/// to inline direct function calls to small functions. +/// +Pass *createFunctionInliningPass(); +Pass *createFunctionInliningPass(int Threshold); + +//===----------------------------------------------------------------------===// +/// createAlwaysInlinerPass - Return a new pass object that inlines only +/// functions that are marked as "always_inline". +Pass *createAlwaysInlinerPass(); + +//===----------------------------------------------------------------------===// +/// createPruneEHPass - Return a new pass object which transforms invoke +/// instructions into calls, if the callee can _not_ unwind the stack. +/// +Pass *createPruneEHPass(); + +//===----------------------------------------------------------------------===// +/// createInternalizePass - This pass loops over all of the functions in the +/// input module, internalizing all globals (functions and variables) not part +/// of the api. If a list of symbols is specified with the +/// -internalize-public-api-* command line options, those symbols are not +/// internalized and all others are. Otherwise if AllButMain is set and the +/// main function is found, all other globals are marked as internal. If no api +/// is supplied and AllButMain is not set, or no main function is found, nothing +/// is internalized. +/// +ModulePass *createInternalizePass(bool AllButMain); + +/// createInternalizePass - This pass loops over all of the functions in the +/// input module, internalizing all globals (functions and variables) not in the +/// given exportList. +/// +/// Note that commandline options that are used with the above function are not +/// used now! Also, when exportList is empty, nothing is internalized. +ModulePass *createInternalizePass(const std::vector<const char *> &exportList); + +//===----------------------------------------------------------------------===// +/// createDeadArgEliminationPass - This pass removes arguments from functions +/// which are not used by the body of the function. +/// +ModulePass *createDeadArgEliminationPass(); + +/// DeadArgHacking pass - Same as DAE, but delete arguments of external +/// functions as well. This is definitely not safe, and should only be used by +/// bugpoint. +ModulePass *createDeadArgHackingPass(); + +//===----------------------------------------------------------------------===// +/// createArgumentPromotionPass - This pass promotes "by reference" arguments to +/// be passed by value if the number of elements passed is smaller or +/// equal to maxElements (maxElements == 0 means always promote). +/// +Pass *createArgumentPromotionPass(unsigned maxElements = 3); +Pass *createStructRetPromotionPass(); + +//===----------------------------------------------------------------------===// +/// createIPConstantPropagationPass - This pass propagates constants from call +/// sites into the bodies of functions. +/// +ModulePass *createIPConstantPropagationPass(); + +//===----------------------------------------------------------------------===// +/// createIPSCCPPass - This pass propagates constants from call sites into the +/// bodies of functions, and keeps track of whether basic blocks are executable +/// in the process. +/// +ModulePass *createIPSCCPPass(); + +//===----------------------------------------------------------------------===// +// +/// createLoopExtractorPass - This pass extracts all natural loops from the +/// program into a function if it can. +/// +FunctionPass *createLoopExtractorPass(); + +/// createSingleLoopExtractorPass - This pass extracts one natural loop from the +/// program into a function if it can. This is used by bugpoint. +/// +FunctionPass *createSingleLoopExtractorPass(); + +/// createBlockExtractorPass - This pass extracts all blocks (except those +/// specified in the argument list) from the functions in the module. +/// +ModulePass *createBlockExtractorPass(const std::vector<BasicBlock*> &BTNE); + +/// createIndMemRemPass - This pass removes potential indirect calls of +/// malloc and free +ModulePass *createIndMemRemPass(); + +/// createStripDeadPrototypesPass - This pass removes any function declarations +/// (prototypes) that are not used. +ModulePass *createStripDeadPrototypesPass(); + +//===----------------------------------------------------------------------===// +/// createPartialSpecializationPass - This pass specializes functions for +/// constant arguments. +/// +ModulePass *createPartialSpecializationPass(); + +//===----------------------------------------------------------------------===// +/// createFunctionAttrsPass - This pass discovers functions that do not access +/// memory, or only read memory, and gives them the readnone/readonly attribute. +/// It also discovers function arguments that are not captured by the function +/// and marks them with the nocapture attribute. +/// +Pass *createFunctionAttrsPass(); + +//===----------------------------------------------------------------------===// +/// createMergeFunctionsPass - This pass discovers identical functions and +/// collapses them. +/// +ModulePass *createMergeFunctionsPass(); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/IPO/InlinerPass.h b/include/llvm/Transforms/IPO/InlinerPass.h new file mode 100644 index 0000000..b370e96 --- /dev/null +++ b/include/llvm/Transforms/IPO/InlinerPass.h @@ -0,0 +1,90 @@ +//===- InlinerPass.h - Code common to all inliners --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a simple policy-based bottom-up inliner. This file +// implements all of the boring mechanics of the bottom-up inlining, while the +// subclass determines WHAT to inline, which is the much more interesting +// component. +// +//===----------------------------------------------------------------------===// + +#ifndef INLINER_H +#define INLINER_H + +#include "llvm/CallGraphSCCPass.h" +#include "llvm/Transforms/Utils/InlineCost.h" +#include "llvm/Target/TargetData.h" + + +namespace llvm { + class CallSite; + +/// Inliner - This class contains all of the helper code which is used to +/// perform the inlining operations that do not depend on the policy. +/// +struct Inliner : public CallGraphSCCPass { + explicit Inliner(void *ID); + explicit Inliner(void *ID, int Threshold); + + /// getAnalysisUsage - For this class, we declare that we require and preserve + /// the call graph. If the derived class implements this method, it should + /// always explicitly call the implementation here. + virtual void getAnalysisUsage(AnalysisUsage &Info) const; + + // Main run interface method, this implements the interface required by the + // Pass class. + virtual bool runOnSCC(const std::vector<CallGraphNode *> &SCC); + + // doFinalization - Remove now-dead linkonce functions at the end of + // processing to avoid breaking the SCC traversal. + virtual bool doFinalization(CallGraph &CG); + + // InlineCallIfPossible + bool InlineCallIfPossible(CallSite CS, CallGraph &CG, + const SmallPtrSet<Function*, 8> &SCCFunctions, + const TargetData &TD); + + /// This method returns the value specified by the -inline-threshold value, + /// specified on the command line. This is typically not directly needed. + /// + unsigned getInlineThreshold() const { return InlineThreshold; } + + /// getInlineCost - This method must be implemented by the subclass to + /// determine the cost of inlining the specified call site. If the cost + /// returned is greater than the current inline threshold, the call site is + /// not inlined. + /// + virtual InlineCost getInlineCost(CallSite CS) = 0; + + // getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a + // higher threshold to determine if the function call should be inlined. + /// + virtual float getInlineFudgeFactor(CallSite CS) = 0; + + /// resetCachedCostInfo - erase any cached cost data from the derived class. + /// If the derived class has no such data this can be empty. + /// + virtual void resetCachedCostInfo(Function* Caller) = 0; + + /// removeDeadFunctions - Remove dead functions that are not included in + /// DNR (Do Not Remove) list. + bool removeDeadFunctions(CallGraph &CG, + SmallPtrSet<const Function *, 16> *DNR = NULL); +private: + // InlineThreshold - Cache the value here for easy access. + unsigned InlineThreshold; + + /// shouldInline - Return true if the inliner should attempt to + /// inline at the given CallSite. + bool shouldInline(CallSite CS); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h new file mode 100644 index 0000000..698e248 --- /dev/null +++ b/include/llvm/Transforms/Instrumentation.h @@ -0,0 +1,37 @@ +//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines constructor functions for instrumentation passes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H +#define LLVM_TRANSFORMS_INSTRUMENTATION_H + +namespace llvm { + +class ModulePass; +class FunctionPass; + +// Insert function profiling instrumentation +ModulePass *createFunctionProfilerPass(); + +// Insert block profiling instrumentation +ModulePass *createBlockProfilerPass(); + +// Insert edge profiling instrumentation +ModulePass *createEdgeProfilerPass(); + +// Random Sampling Profiling Framework +ModulePass* createNullProfilerRSPass(); +FunctionPass* createRSProfilingPass(); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/RSProfiling.h b/include/llvm/Transforms/RSProfiling.h new file mode 100644 index 0000000..98ec396 --- /dev/null +++ b/include/llvm/Transforms/RSProfiling.h @@ -0,0 +1,38 @@ +//===- RSProfiling.cpp - Various profiling using random sampling ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the abstract interface that a profiler must implement to +// support the random profiling transform. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_RSPROFILING_H +#define LLVM_TRANSFORMS_RSPROFILING_H + +namespace llvm { + //===--------------------------------------------------------------------===// + /// RSProfilers - The basic Random Sampling Profiler Interface Any profiler + /// that implements this interface can be transformed by the random sampling + /// pass to be sample based rather than always on. + /// + /// The only exposed function can be queried to find out if an instruction + /// was original or if it was inserted by the profiler. Implementations of + /// this interface are expected to chain to other implementations, such that + /// multiple profilers can be support simultaniously. + struct RSProfilers : public ModulePass { + static char ID; // Pass identification, replacement for typeinfo + RSProfilers() : ModulePass(&ID) {} + + /// isProfiling - This method returns true if the value passed it was + /// inserted by the profiler. + virtual bool isProfiling(Value* v) = 0; + }; +} + +#endif diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h new file mode 100644 index 0000000..2c3fdd4 --- /dev/null +++ b/include/llvm/Transforms/Scalar.h @@ -0,0 +1,342 @@ +//===-- Scalar.h - Scalar Transformations -----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes for accessor functions that expose passes +// in the Scalar transformations library. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_H +#define LLVM_TRANSFORMS_SCALAR_H + +namespace llvm { + +class FunctionPass; +class Pass; +class GetElementPtrInst; +class PassInfo; +class TerminatorInst; +class TargetLowering; + +//===----------------------------------------------------------------------===// +// +// ConstantPropagation - A worklist driven constant propagation pass +// +FunctionPass *createConstantPropagationPass(); + +//===----------------------------------------------------------------------===// +// +// SCCP - Sparse conditional constant propagation. +// +FunctionPass *createSCCPPass(); + +//===----------------------------------------------------------------------===// +// +// DeadInstElimination - This pass quickly removes trivially dead instructions +// without modifying the CFG of the function. It is a BasicBlockPass, so it +// runs efficiently when queued next to other BasicBlockPass's. +// +Pass *createDeadInstEliminationPass(); + +//===----------------------------------------------------------------------===// +// +// DeadCodeElimination - This pass is more powerful than DeadInstElimination, +// because it is worklist driven that can potentially revisit instructions when +// their other instructions become dead, to eliminate chains of dead +// computations. +// +FunctionPass *createDeadCodeEliminationPass(); + +//===----------------------------------------------------------------------===// +// +// DeadStoreElimination - This pass deletes stores that are post-dominated by +// must-aliased stores and are not loaded used between the stores. +// +FunctionPass *createDeadStoreEliminationPass(); + +//===----------------------------------------------------------------------===// +// +// AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm. This +// algorithm assumes instructions are dead until proven otherwise, which makes +// it more successful are removing non-obviously dead instructions. +// +FunctionPass *createAggressiveDCEPass(); + +//===----------------------------------------------------------------------===// +// +// ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas +// if possible. +// +FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1); + +//===----------------------------------------------------------------------===// +// +// InductionVariableSimplify - Transform induction variables in a program to all +// use a single canonical induction variable per loop. +// +Pass *createIndVarSimplifyPass(); + +//===----------------------------------------------------------------------===// +// +// InstructionCombining - Combine instructions to form fewer, simple +// instructions. This pass does not modify the CFG, and has a tendency to make +// instructions dead, so a subsequent DCE pass is useful. +// +// This pass combines things like: +// %Y = add int 1, %X +// %Z = add int 1, %Y +// into: +// %Z = add int 2, %X +// +FunctionPass *createInstructionCombiningPass(); + +//===----------------------------------------------------------------------===// +// +// LICM - This pass is a loop invariant code motion and memory promotion pass. +// +Pass *createLICMPass(); + +//===----------------------------------------------------------------------===// +// +// LoopStrengthReduce - This pass is strength reduces GEP instructions that use +// a loop's canonical induction variable as one of their indices. It takes an +// optional parameter used to consult the target machine whether certain +// transformations are profitable. +// +Pass *createLoopStrengthReducePass(const TargetLowering *TLI = 0); + +//===----------------------------------------------------------------------===// +// +// LoopUnswitch - This pass is a simple loop unswitching pass. +// +Pass *createLoopUnswitchPass(bool OptimizeForSize = false); + +//===----------------------------------------------------------------------===// +// +// LoopUnroll - This pass is a simple loop unrolling pass. +// +Pass *createLoopUnrollPass(); + +//===----------------------------------------------------------------------===// +// +// LoopRotate - This pass is a simple loop rotating pass. +// +Pass *createLoopRotatePass(); + +//===----------------------------------------------------------------------===// +// +// LoopIndexSplit - This pass divides loop's iteration range by spliting loop +// such that each individual loop is executed efficiently. +// +Pass *createLoopIndexSplitPass(); + +//===----------------------------------------------------------------------===// +// +// PromoteMemoryToRegister - This pass is used to promote memory references to +// be register references. A simple example of the transformation performed by +// this pass is: +// +// FROM CODE TO CODE +// %X = alloca int, uint 1 ret int 42 +// store int 42, int *%X +// %Y = load int* %X +// ret int %Y +// +FunctionPass *createPromoteMemoryToRegisterPass(); +extern const PassInfo *const PromoteMemoryToRegisterID; + +//===----------------------------------------------------------------------===// +// +// DemoteRegisterToMemoryPass - This pass is used to demote registers to memory +// references. In basically undoes the PromoteMemoryToRegister pass to make cfg +// hacking easier. +// +FunctionPass *createDemoteRegisterToMemoryPass(); +extern const PassInfo *const DemoteRegisterToMemoryID; + +//===----------------------------------------------------------------------===// +// +// Reassociate - This pass reassociates commutative expressions in an order that +// is designed to promote better constant propagation, GCSE, LICM, PRE... +// +// For example: 4 + (x + 5) -> x + (4 + 5) +// +FunctionPass *createReassociatePass(); + +//===----------------------------------------------------------------------===// +// +// CondPropagationPass - This pass propagates information about conditional +// expressions through the program, allowing it to eliminate conditional +// branches in some cases. +// +FunctionPass *createCondPropagationPass(); + +//===----------------------------------------------------------------------===// +// +// TailDuplication - Eliminate unconditional branches through controlled code +// duplication, creating simpler CFG structures. +// +FunctionPass *createTailDuplicationPass(); + +//===----------------------------------------------------------------------===// +// +// JumpThreading - Thread control through mult-pred/multi-succ blocks where some +// preds always go to some succ. +// +FunctionPass *createJumpThreadingPass(); + +//===----------------------------------------------------------------------===// +// +// CFGSimplification - Merge basic blocks, eliminate unreachable blocks, +// simplify terminator instructions, etc... +// +FunctionPass *createCFGSimplificationPass(); + +//===----------------------------------------------------------------------===// +// +// BreakCriticalEdges - Break all of the critical edges in the CFG by inserting +// a dummy basic block. This pass may be "required" by passes that cannot deal +// with critical edges. For this usage, a pass must call: +// +// AU.addRequiredID(BreakCriticalEdgesID); +// +// This pass obviously invalidates the CFG, but can update forward dominator +// (set, immediate dominators, tree, and frontier) information. +// +FunctionPass *createBreakCriticalEdgesPass(); +extern const PassInfo *const BreakCriticalEdgesID; + +//===----------------------------------------------------------------------===// +// +// LoopSimplify - Insert Pre-header blocks into the CFG for every function in +// the module. This pass updates dominator information, loop information, and +// does not add critical edges to the CFG. +// +// AU.addRequiredID(LoopSimplifyID); +// +FunctionPass *createLoopSimplifyPass(); +extern const PassInfo *const LoopSimplifyID; + +//===----------------------------------------------------------------------===// +// +// LowerAllocations - Turn malloc and free instructions into %malloc and %free +// calls. +// +// AU.addRequiredID(LowerAllocationsID); +// +Pass *createLowerAllocationsPass(bool LowerMallocArgToInteger = false); +extern const PassInfo *const LowerAllocationsID; + +//===----------------------------------------------------------------------===// +// +// TailCallElimination - This pass eliminates call instructions to the current +// function which occur immediately before return instructions. +// +FunctionPass *createTailCallEliminationPass(); + +//===----------------------------------------------------------------------===// +// +// LowerSwitch - This pass converts SwitchInst instructions into a sequence of +// chained binary branch instructions. +// +FunctionPass *createLowerSwitchPass(); +extern const PassInfo *const LowerSwitchID; + +//===----------------------------------------------------------------------===// +// +// LowerInvoke - This pass converts invoke and unwind instructions to use sjlj +// exception handling mechanisms. Note that after this pass runs the CFG is not +// entirely accurate (exceptional control flow edges are not correct anymore) so +// only very simple things should be done after the lowerinvoke pass has run +// (like generation of native code). This should *NOT* be used as a general +// purpose "my LLVM-to-LLVM pass doesn't support the invoke instruction yet" +// lowering pass. +// +FunctionPass *createLowerInvokePass(const TargetLowering *TLI = 0); +extern const PassInfo *const LowerInvokePassID; + +//===----------------------------------------------------------------------===// +// +// BlockPlacement - This pass reorders basic blocks in order to increase the +// number of fall-through conditional branches. +// +FunctionPass *createBlockPlacementPass(); + +//===----------------------------------------------------------------------===// +// +// LCSSA - This pass inserts phi nodes at loop boundaries to simplify other loop +// optimizations. +// +Pass *createLCSSAPass(); +extern const PassInfo *const LCSSAID; + +//===----------------------------------------------------------------------===// +// +// PredicateSimplifier - This pass collapses duplicate variables into one +// canonical form, and tries to simplify expressions along the way. +// +FunctionPass *createPredicateSimplifierPass(); + +//===----------------------------------------------------------------------===// +// +// GVN-PRE - This pass performs global value numbering and partial redundancy +// elimination. +// +FunctionPass *createGVNPREPass(); + +//===----------------------------------------------------------------------===// +// +// GVN - This pass performs global value numbering and redundant load +// elimination cotemporaneously. +// +FunctionPass *createGVNPass(); + +//===----------------------------------------------------------------------===// +// +// MemCpyOpt - This pass performs optimizations related to eliminating memcpy +// calls and/or combining multiple stores into memset's. +// +FunctionPass *createMemCpyOptPass(); + +//===----------------------------------------------------------------------===// +// +// LoopDeletion - This pass performs DCE of non-infinite loops that it +// can prove are dead. +// +Pass *createLoopDeletionPass(); + +//===----------------------------------------------------------------------===// +// +/// createSimplifyLibCallsPass - This pass optimizes specific calls to +/// specific well-known (library) functions. +FunctionPass *createSimplifyLibCallsPass(); + +//===----------------------------------------------------------------------===// +// +/// createSimplifyHalfPowrLibCallsPass - This is an experimental pass that +/// optimizes specific half_pow functions. +FunctionPass *createSimplifyHalfPowrLibCallsPass(); + +//===----------------------------------------------------------------------===// +// +// CodeGenPrepare - This pass prepares a function for instruction selection. +// +FunctionPass *createCodeGenPreparePass(const TargetLowering *TLI = 0); + + +//===----------------------------------------------------------------------===// +// +// InstructionNamer - Give any unnamed non-void instructions "tmp" names. +// +FunctionPass *createInstructionNamerPass(); +extern const PassInfo *const InstructionNamerID; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/AddrModeMatcher.h b/include/llvm/Transforms/Utils/AddrModeMatcher.h new file mode 100644 index 0000000..913a541 --- /dev/null +++ b/include/llvm/Transforms/Utils/AddrModeMatcher.h @@ -0,0 +1,102 @@ +//===- AddrModeMatcher.h - Addressing mode matching facility ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// AddressingModeMatcher - This class exposes a single public method, which is +// used to construct a "maximal munch" of the addressing mode for the target +// specified by TLI for an access to "V" with an access type of AccessTy. This +// returns the addressing mode that is actually matched by value, but also +// returns the list of instructions involved in that addressing computation in +// AddrModeInsts. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H +#define LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Streams.h" +#include "llvm/Target/TargetLowering.h" + +namespace llvm { + +class GlobalValue; +class Instruction; +class Value; +class Type; +class User; + +/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode +/// which holds actual Value*'s for register values. +struct ExtAddrMode : public TargetLowering::AddrMode { + Value *BaseReg; + Value *ScaledReg; + ExtAddrMode() : BaseReg(0), ScaledReg(0) {} + void print(OStream &OS) const; + void dump() const; +}; + +static inline OStream &operator<<(OStream &OS, const ExtAddrMode &AM) { + AM.print(OS); + return OS; +} + +class AddressingModeMatcher { + SmallVectorImpl<Instruction*> &AddrModeInsts; + const TargetLowering &TLI; + + /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and + /// the memory instruction that we're computing this address for. + const Type *AccessTy; + Instruction *MemoryInst; + + /// AddrMode - This is the addressing mode that we're building up. This is + /// part of the return value of this addressing mode matching stuff. + ExtAddrMode &AddrMode; + + /// IgnoreProfitability - This is set to true when we should not do + /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode + /// always returns true. + bool IgnoreProfitability; + + AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI, + const TargetLowering &T, const Type *AT, + Instruction *MI, ExtAddrMode &AM) + : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) { + IgnoreProfitability = false; + } +public: + + /// Match - Find the maximal addressing mode that a load/store of V can fold, + /// give an access type of AccessTy. This returns a list of involved + /// instructions in AddrModeInsts. + static ExtAddrMode Match(Value *V, const Type *AccessTy, + Instruction *MemoryInst, + SmallVectorImpl<Instruction*> &AddrModeInsts, + const TargetLowering &TLI) { + ExtAddrMode Result; + + bool Success = + AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, + MemoryInst, Result).MatchAddr(V, 0); + Success = Success; assert(Success && "Couldn't select *anything*?"); + return Result; + } +private: + bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); + bool MatchAddr(Value *V, unsigned Depth); + bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth); + bool IsProfitableToFoldIntoAddressingMode(Instruction *I, + ExtAddrMode &AMBefore, + ExtAddrMode &AMAfter); + bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h new file mode 100644 index 0000000..95ffa46 --- /dev/null +++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h @@ -0,0 +1,191 @@ +//===-- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This family of functions perform manipulations on basic blocks, and +// instructions contained within basic blocks. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCK_H +#define LLVM_TRANSFORMS_UTILS_BASICBLOCK_H + +// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock + +#include "llvm/BasicBlock.h" +#include "llvm/Support/CFG.h" + +namespace llvm { + +class Instruction; +class Pass; +class AliasAnalysis; + +/// DeleteDeadBlock - Delete the specified block, which must have no +/// predecessors. +void DeleteDeadBlock(BasicBlock *BB); + + +/// FoldSingleEntryPHINodes - We know that BB has one predecessor. If there are +/// any single-entry PHI nodes in it, fold them away. This handles the case +/// when all entries to the PHI nodes in a block are guaranteed equal, such as +/// when the block has exactly one predecessor. +void FoldSingleEntryPHINodes(BasicBlock *BB); + +/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it +/// is dead. Also recursively delete any operands that become dead as +/// a result. This includes tracing the def-use list from the PHI to see if +/// it is ultimately unused or if it reaches an unused cycle. +void DeleteDeadPHIs(BasicBlock *BB); + +/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor, +/// if possible. The return value indicates success or failure. +bool MergeBlockIntoPredecessor(BasicBlock* BB, Pass* P = 0); + +// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI) +// with a value, then remove and delete the original instruction. +// +void ReplaceInstWithValue(BasicBlock::InstListType &BIL, + BasicBlock::iterator &BI, Value *V); + +// ReplaceInstWithInst - Replace the instruction specified by BI with the +// instruction specified by I. The original instruction is deleted and BI is +// updated to point to the new instruction. +// +void ReplaceInstWithInst(BasicBlock::InstListType &BIL, + BasicBlock::iterator &BI, Instruction *I); + +// ReplaceInstWithInst - Replace the instruction specified by From with the +// instruction specified by To. +// +void ReplaceInstWithInst(Instruction *From, Instruction *To); + +/// CopyPrecedingStopPoint - If I is immediately preceded by a StopPoint, +/// make a copy of the stoppoint before InsertPos (presumably before copying +/// or moving I). +void CopyPrecedingStopPoint(Instruction *I, BasicBlock::iterator InsertPos); + +/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the +/// instruction before ScanFrom) checking to see if we have the value at the +/// memory address *Ptr locally available within a small number of instructions. +/// If the value is available, return it. +/// +/// If not, return the iterator for the last validated instruction that the +/// value would be live through. If we scanned the entire block and didn't find +/// something that invalidates *Ptr or provides it, ScanFrom would be left at +/// begin() and this returns null. ScanFrom could also be left +/// +/// MaxInstsToScan specifies the maximum instructions to scan in the block. If +/// it is set to 0, it will scan the whole block. You can also optionally +/// specify an alias analysis implementation, which makes this more precise. +Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB, + BasicBlock::iterator &ScanFrom, + unsigned MaxInstsToScan = 6, + AliasAnalysis *AA = 0); + +/// FindFunctionBackedges - Analyze the specified function to find all of the +/// loop backedges in the function and return them. This is a relatively cheap +/// (compared to computing dominators and loop info) analysis. +/// +/// The output is added to Result, as pairs of <from,to> edge info. +void FindFunctionBackedges(const Function &F, + SmallVectorImpl<std::pair<const BasicBlock*,const BasicBlock*> > &Result); + + +// RemoveSuccessor - Change the specified terminator instruction such that its +// successor #SuccNum no longer exists. Because this reduces the outgoing +// degree of the current basic block, the actual terminator instruction itself +// may have to be changed. In the case where the last successor of the block is +// deleted, a return instruction is inserted in its place which can cause a +// suprising change in program behavior if it is not expected. +// +void RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum); + +/// isCriticalEdge - Return true if the specified edge is a critical edge. +/// Critical edges are edges from a block with multiple successors to a block +/// with multiple predecessors. +/// +bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, + bool AllowIdenticalEdges = false); + +/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to +/// split the critical edge. This will update DominatorTree and +/// DominatorFrontier information if it is available, thus calling this pass +/// will not invalidate either of them. This returns true if the edge was split, +/// false otherwise. +/// +/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the +/// specified successor will be merged into the same critical edge block. +/// This is most commonly interesting with switch instructions, which may +/// have many edges to any one destination. This ensures that all edges to that +/// dest go to one block instead of each going to a different block, but isn't +/// the standard definition of a "critical edge". +/// +bool SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, Pass *P = 0, + bool MergeIdenticalEdges = false); + +inline bool SplitCriticalEdge(BasicBlock *BB, succ_iterator SI, Pass *P = 0) { + return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(), P); +} + +/// SplitCriticalEdge - If the edge from *PI to BB is not critical, return +/// false. Otherwise, split all edges between the two blocks and return true. +/// This updates all of the same analyses as the other SplitCriticalEdge +/// function. If P is specified, it updates the analyses +/// described above. +inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI, Pass *P = 0) { + bool MadeChange = false; + TerminatorInst *TI = (*PI)->getTerminator(); + for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) + if (TI->getSuccessor(i) == Succ) + MadeChange |= SplitCriticalEdge(TI, i, P); + return MadeChange; +} + +/// SplitCriticalEdge - If an edge from Src to Dst is critical, split the edge +/// and return true, otherwise return false. This method requires that there be +/// an edge between the two blocks. If P is specified, it updates the analyses +/// described above. +inline bool SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst, Pass *P = 0, + bool MergeIdenticalEdges = false) { + TerminatorInst *TI = Src->getTerminator(); + unsigned i = 0; + while (1) { + assert(i != TI->getNumSuccessors() && "Edge doesn't exist!"); + if (TI->getSuccessor(i) == Dst) + return SplitCriticalEdge(TI, i, P, MergeIdenticalEdges); + ++i; + } +} + +/// SplitEdge - Split the edge connecting specified block. Pass P must +/// not be NULL. +BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To, Pass *P); + +/// SplitBlock - Split the specified block at the specified instruction - every +/// thing before SplitPt stays in Old and everything starting with SplitPt moves +/// to a new block. The two blocks are joined by an unconditional branch and +/// the loop info is updated. +/// +BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P); + +/// SplitBlockPredecessors - This method transforms BB by introducing a new +/// basic block into the function, and moving some of the predecessors of BB to +/// be predecessors of the new block. The new predecessors are indicated by the +/// Preds array, which has NumPreds elements in it. The new block is given a +/// suffix of 'Suffix'. This function returns the new block. +/// +/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree and +/// DominanceFrontier, but no other analyses. +BasicBlock *SplitBlockPredecessors(BasicBlock *BB, BasicBlock *const *Preds, + unsigned NumPreds, const char *Suffix, + Pass *P = 0); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/BasicInliner.h b/include/llvm/Transforms/Utils/BasicInliner.h new file mode 100644 index 0000000..6a57055 --- /dev/null +++ b/include/llvm/Transforms/Utils/BasicInliner.h @@ -0,0 +1,55 @@ +//===- BasicInliner.h - Basic function level inliner ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a simple function based inliner that does not use +// call graph information. +// +//===----------------------------------------------------------------------===// + +#ifndef BASICINLINER_H +#define BASICINLINER_H + +#include "llvm/Transforms/Utils/InlineCost.h" + +namespace llvm { + + class Function; + class TargetData; + struct BasicInlinerImpl; + + /// BasicInliner - BasicInliner provides function level inlining interface. + /// Clients provide list of functions which are inline without using + /// module level call graph information. Note that the BasicInliner is + /// free to delete a function if it is inlined into all call sites. + class BasicInliner { + public: + + explicit BasicInliner(TargetData *T = NULL); + ~BasicInliner(); + + /// addFunction - Add function into the list of functions to process. + /// All functions must be inserted using this interface before invoking + /// inlineFunctions(). + void addFunction(Function *F); + + /// neverInlineFunction - Sometimes a function is never to be inlined + /// because of one or other reason. + void neverInlineFunction(Function *F); + + /// inlineFuctions - Walk all call sites in all functions supplied by + /// client. Inline as many call sites as possible. Delete completely + /// inlined functions. + void inlineFunctions(); + + private: + BasicInlinerImpl *Impl; + }; +} + +#endif diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h new file mode 100644 index 0000000..1e2bbaa --- /dev/null +++ b/include/llvm/Transforms/Utils/Cloning.h @@ -0,0 +1,191 @@ +//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various functions that are used to clone chunks of LLVM +// code for various purposes. This varies from copying whole modules into new +// modules, to cloning functions with different arguments, to inlining +// functions, to copying basic blocks to support loop unrolling or superblock +// formation, etc. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H +#define LLVM_TRANSFORMS_UTILS_CLONING_H + +#include <vector> +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + +class Module; +class Function; +class Pass; +class LPPassManager; +class BasicBlock; +class Value; +class CallInst; +class InvokeInst; +class ReturnInst; +class CallSite; +class Trace; +class CallGraph; +class TargetData; +class LoopInfo; +template<class N> class LoopBase; +typedef LoopBase<BasicBlock> Loop; + +/// CloneModule - Return an exact copy of the specified module +/// +Module *CloneModule(const Module *M); +Module *CloneModule(const Module *M, DenseMap<const Value*, Value*> &ValueMap); + +/// ClonedCodeInfo - This struct can be used to capture information about code +/// being cloned, while it is being cloned. +struct ClonedCodeInfo { + /// ContainsCalls - This is set to true if the cloned code contains a normal + /// call instruction. + bool ContainsCalls; + + /// ContainsUnwinds - This is set to true if the cloned code contains an + /// unwind instruction. + bool ContainsUnwinds; + + /// ContainsDynamicAllocas - This is set to true if the cloned code contains + /// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in + /// the entry block or they are in the entry block but are not a constant + /// size. + bool ContainsDynamicAllocas; + + ClonedCodeInfo() { + ContainsCalls = false; + ContainsUnwinds = false; + ContainsDynamicAllocas = false; + } +}; + + +/// CloneBasicBlock - Return a copy of the specified basic block, but without +/// embedding the block into a particular function. The block returned is an +/// exact copy of the specified basic block, without any remapping having been +/// performed. Because of this, this is only suitable for applications where +/// the basic block will be inserted into the same function that it was cloned +/// from (loop unrolling would use this, for example). +/// +/// Also, note that this function makes a direct copy of the basic block, and +/// can thus produce illegal LLVM code. In particular, it will copy any PHI +/// nodes from the original block, even though there are no predecessors for the +/// newly cloned block (thus, phi nodes will have to be updated). Also, this +/// block will branch to the old successors of the original block: these +/// successors will have to have any PHI nodes updated to account for the new +/// incoming edges. +/// +/// The correlation between instructions in the source and result basic blocks +/// is recorded in the ValueMap map. +/// +/// If you have a particular suffix you'd like to use to add to any cloned +/// names, specify it as the optional third parameter. +/// +/// If you would like the basic block to be auto-inserted into the end of a +/// function, you can specify it as the optional fourth parameter. +/// +/// If you would like to collect additional information about the cloned +/// function, you can specify a ClonedCodeInfo object with the optional fifth +/// parameter. +/// +BasicBlock *CloneBasicBlock(const BasicBlock *BB, + DenseMap<const Value*, Value*> &ValueMap, + const char *NameSuffix = "", Function *F = 0, + ClonedCodeInfo *CodeInfo = 0); + + +/// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate ValueMap +/// using old blocks to new blocks mapping. +Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI, + DenseMap<const Value *, Value *> &ValueMap, Pass *P); + +/// CloneFunction - Return a copy of the specified function, but without +/// embedding the function into another module. Also, any references specified +/// in the ValueMap are changed to refer to their mapped value instead of the +/// original one. If any of the arguments to the function are in the ValueMap, +/// the arguments are deleted from the resultant function. The ValueMap is +/// updated to include mappings from all of the instructions and basicblocks in +/// the function from their old to new values. The final argument captures +/// information about the cloned code if non-null. +/// +Function *CloneFunction(const Function *F, + DenseMap<const Value*, Value*> &ValueMap, + ClonedCodeInfo *CodeInfo = 0); + +/// CloneFunction - Version of the function that doesn't need the ValueMap. +/// +inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){ + DenseMap<const Value*, Value*> ValueMap; + return CloneFunction(F, ValueMap, CodeInfo); +} + +/// Clone OldFunc into NewFunc, transforming the old arguments into references +/// to ArgMap values. Note that if NewFunc already has basic blocks, the ones +/// cloned into it will be added to the end of the function. This function +/// fills in a list of return instructions, and can optionally append the +/// specified suffix to all values cloned. +/// +void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, + DenseMap<const Value*, Value*> &ValueMap, + std::vector<ReturnInst*> &Returns, + const char *NameSuffix = "", + ClonedCodeInfo *CodeInfo = 0); + +/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, +/// except that it does some simple constant prop and DCE on the fly. The +/// effect of this is to copy significantly less code in cases where (for +/// example) a function call with constant arguments is inlined, and those +/// constant arguments cause a significant amount of code in the callee to be +/// dead. Since this doesn't produce an exactly copy of the input, it can't be +/// used for things like CloneFunction or CloneModule. +void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, + DenseMap<const Value*, Value*> &ValueMap, + std::vector<ReturnInst*> &Returns, + const char *NameSuffix = "", + ClonedCodeInfo *CodeInfo = 0, + const TargetData *TD = 0); + + +/// CloneTraceInto - Clone T into NewFunc. Original<->clone mapping is +/// saved in ValueMap. +/// +void CloneTraceInto(Function *NewFunc, Trace &T, + DenseMap<const Value*, Value*> &ValueMap, + const char *NameSuffix); + +/// CloneTrace - Returns a copy of the specified trace. +/// It takes a vector of basic blocks clones the basic blocks, removes internal +/// phi nodes, adds it to the same function as the original (although there is +/// no jump to it) and returns the new vector of basic blocks. +std::vector<BasicBlock *> CloneTrace(const std::vector<BasicBlock*> &origTrace); + +/// InlineFunction - This function inlines the called function into the basic +/// block of the caller. This returns false if it is not possible to inline +/// this call. The program is still in a well defined state if this occurs +/// though. +/// +/// Note that this only does one level of inlining. For example, if the +/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now +/// exists in the instruction stream. Similiarly this will inline a recursive +/// function by one level. +/// +/// If a non-null callgraph pointer is provided, these functions update the +/// CallGraph to represent the program after inlining. +/// +bool InlineFunction(CallInst *C, CallGraph *CG = 0, const TargetData *TD = 0); +bool InlineFunction(InvokeInst *II, CallGraph *CG = 0, const TargetData *TD =0); +bool InlineFunction(CallSite CS, CallGraph *CG = 0, const TargetData *TD = 0); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/FunctionUtils.h b/include/llvm/Transforms/Utils/FunctionUtils.h new file mode 100644 index 0000000..dc7ef23 --- /dev/null +++ b/include/llvm/Transforms/Utils/FunctionUtils.h @@ -0,0 +1,41 @@ +//===-- Transform/Utils/FunctionUtils.h - Function Utils --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This family of transformations manipulate LLVM functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_FUNCTION_H +#define LLVM_TRANSFORMS_UTILS_FUNCTION_H + +#include "llvm/Analysis/LoopInfo.h" +#include <vector> + +namespace llvm { + class BasicBlock; + class DominatorTree; + class Function; + + /// ExtractCodeRegion - rip out a sequence of basic blocks into a new function + /// + Function* ExtractCodeRegion(DominatorTree& DT, + const std::vector<BasicBlock*> &code, + bool AggregateArgs = false); + + /// ExtractLoop - rip out a natural loop into a new function + /// + Function* ExtractLoop(DominatorTree& DT, Loop *L, + bool AggregateArgs = false); + + /// ExtractBasicBlock - rip out a basic block into a new function + /// + Function* ExtractBasicBlock(BasicBlock *BB, bool AggregateArgs = false); +} + +#endif diff --git a/include/llvm/Transforms/Utils/InlineCost.h b/include/llvm/Transforms/Utils/InlineCost.h new file mode 100644 index 0000000..f275b76 --- /dev/null +++ b/include/llvm/Transforms/Utils/InlineCost.h @@ -0,0 +1,155 @@ +//===- InlineCost.cpp - Cost analysis for inliner ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements heuristics for inlining decisions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_INLINECOST_H +#define LLVM_TRANSFORMS_UTILS_INLINECOST_H + +#include "llvm/ADT/SmallPtrSet.h" +#include <cassert> +#include <climits> +#include <map> +#include <vector> + +namespace llvm { + + class Value; + class Function; + class CallSite; + + /// InlineCost - Represent the cost of inlining a function. This + /// supports special values for functions which should "always" or + /// "never" be inlined. Otherwise, the cost represents a unitless + /// amount; smaller values increase the likelyhood of the function + /// being inlined. + class InlineCost { + enum Kind { + Value, + Always, + Never + }; + + // This is a do-it-yourself implementation of + // int Cost : 30; + // unsigned Type : 2; + // We used to use bitfields, but they were sometimes miscompiled (PR3822). + enum { TYPE_BITS = 2 }; + enum { COST_BITS = unsigned(sizeof(unsigned)) * CHAR_BIT - TYPE_BITS }; + unsigned TypedCost; // int Cost : COST_BITS; unsigned Type : TYPE_BITS; + + Kind getType() const { + return Kind(TypedCost >> COST_BITS); + } + + int getCost() const { + // Sign-extend the bottom COST_BITS bits. + return (int(TypedCost << TYPE_BITS)) >> TYPE_BITS; + } + + InlineCost(int C, int T) { + TypedCost = (unsigned(C << TYPE_BITS) >> TYPE_BITS) | (T << COST_BITS); + assert(getCost() == C && "Cost exceeds InlineCost precision"); + } + public: + static InlineCost get(int Cost) { return InlineCost(Cost, Value); } + static InlineCost getAlways() { return InlineCost(0, Always); } + static InlineCost getNever() { return InlineCost(0, Never); } + + bool isVariable() const { return getType() == Value; } + bool isAlways() const { return getType() == Always; } + bool isNever() const { return getType() == Never; } + + /// getValue() - Return a "variable" inline cost's amount. It is + /// an error to call this on an "always" or "never" InlineCost. + int getValue() const { + assert(getType() == Value && "Invalid access of InlineCost"); + return getCost(); + } + }; + + /// InlineCostAnalyzer - Cost analyzer used by inliner. + class InlineCostAnalyzer { + struct ArgInfo { + public: + unsigned ConstantWeight; + unsigned AllocaWeight; + + ArgInfo(unsigned CWeight, unsigned AWeight) + : ConstantWeight(CWeight), AllocaWeight(AWeight) {} + }; + + // FunctionInfo - For each function, calculate the size of it in blocks and + // instructions. + struct FunctionInfo { + /// NeverInline - True if this callee should never be inlined into a + /// caller. + bool NeverInline; + + /// usesDynamicAlloca - True if this function calls alloca (in the C sense). + bool usesDynamicAlloca; + + /// NumInsts, NumBlocks - Keep track of how large each function is, which + /// is used to estimate the code size cost of inlining it. + unsigned NumInsts, NumBlocks; + + /// NumVectorInsts - Keep track of how many instructions produce vector + /// values. The inliner is being more aggressive with inlining vector + /// kernels. + unsigned NumVectorInsts; + + /// ArgumentWeights - Each formal argument of the function is inspected to + /// see if it is used in any contexts where making it a constant or alloca + /// would reduce the code size. If so, we add some value to the argument + /// entry here. + std::vector<ArgInfo> ArgumentWeights; + + FunctionInfo() : NeverInline(false), usesDynamicAlloca(false), NumInsts(0), + NumBlocks(0), NumVectorInsts(0) {} + + /// analyzeFunction - Fill in the current structure with information + /// gleaned from the specified function. + void analyzeFunction(Function *F); + + /// CountCodeReductionForConstant - Figure out an approximation for how + /// many instructions will be constant folded if the specified value is + /// constant. + unsigned CountCodeReductionForConstant(Value *V); + + /// CountCodeReductionForAlloca - Figure out an approximation of how much + /// smaller the function will be if it is inlined into a context where an + /// argument becomes an alloca. + /// + unsigned CountCodeReductionForAlloca(Value *V); + }; + + std::map<const Function *, FunctionInfo> CachedFunctionInfo; + + public: + + /// getInlineCost - The heuristic used to determine if we should inline the + /// function call or not. + /// + InlineCost getInlineCost(CallSite CS, + SmallPtrSet<const Function *, 16> &NeverInline); + + /// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a + /// higher threshold to determine if the function call should be inlined. + float getInlineFudgeFactor(CallSite CS); + + /// resetCachedFunctionInfo - erase any cached cost info for this function. + void resetCachedCostInfo(Function* Caller) { + CachedFunctionInfo[Caller].NumBlocks = 0; + } + }; +} + +#endif diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h new file mode 100644 index 0000000..5ea1a50 --- /dev/null +++ b/include/llvm/Transforms/Utils/Local.h @@ -0,0 +1,116 @@ +//===-- Local.h - Functions to perform local transformations ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This family of functions perform various local transformations to the +// program. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H +#define LLVM_TRANSFORMS_UTILS_LOCAL_H + +namespace llvm { + +class User; +class BasicBlock; +class Instruction; +class Value; +class Pass; +class PHINode; +class AllocaInst; +class ConstantExpr; +class TargetData; +struct DbgInfoIntrinsic; + +template<typename T> class SmallVectorImpl; + +//===----------------------------------------------------------------------===// +// Local constant propagation. +// + +/// ConstantFoldTerminator - If a terminator instruction is predicated on a +/// constant value, convert it into an unconditional branch to the constant +/// destination. This is a nontrivial operation because the successors of this +/// basic block must have their PHI nodes updated. +/// +bool ConstantFoldTerminator(BasicBlock *BB); + +//===----------------------------------------------------------------------===// +// Local dead code elimination. +// + +/// isInstructionTriviallyDead - Return true if the result produced by the +/// instruction is not used, and the instruction has no side effects. +/// +bool isInstructionTriviallyDead(Instruction *I); + +/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a +/// trivially dead instruction, delete it. If that makes any of its operands +/// trivially dead, delete them too, recursively. +void RecursivelyDeleteTriviallyDeadInstructions(Value *V); + +/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively +/// dead PHI node, due to being a def-use chain of single-use nodes that +/// either forms a cycle or is terminated by a trivially dead instruction, +/// delete it. If that makes any of its operands trivially dead, delete them +/// too, recursively. +void RecursivelyDeleteDeadPHINode(PHINode *PN); + +//===----------------------------------------------------------------------===// +// Control Flow Graph Restructuring. +// + +/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its +/// predecessor is known to have one successor (BB!). Eliminate the edge +/// between them, moving the instructions in the predecessor into BB. This +/// deletes the predecessor block. +/// +void MergeBasicBlockIntoOnlyPred(BasicBlock *BB); + + +/// SimplifyCFG - This function is used to do simplification of a CFG. For +/// example, it adjusts branches to branches to eliminate the extra hop, it +/// eliminates unreachable basic blocks, and does other "peephole" optimization +/// of the CFG. It returns true if a modification was made, possibly deleting +/// the basic block that was pointed to. +/// +/// WARNING: The entry node of a method may not be simplified. +/// +bool SimplifyCFG(BasicBlock *BB); + +/// DemoteRegToStack - This function takes a virtual register computed by an +/// Instruction and replaces it with a slot in the stack frame, allocated via +/// alloca. This allows the CFG to be changed around without fear of +/// invalidating the SSA information for the value. It returns the pointer to +/// the alloca inserted to create a stack slot for X. +/// +AllocaInst *DemoteRegToStack(Instruction &X, bool VolatileLoads = false, + Instruction *AllocaPoint = 0); + +/// DemotePHIToStack - This function takes a virtual register computed by a phi +/// node and replaces it with a slot in the stack frame, allocated via alloca. +/// The phi node is deleted and it returns the pointer to the alloca inserted. +AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0); + +/// OnlyUsedByDbgIntrinsics - Return true if the instruction I is only used +/// by DbgIntrinsics. If DbgInUses is specified then the vector is filled +/// with DbgInfoIntrinsic that use the instruction I. +bool OnlyUsedByDbgInfoIntrinsics(Instruction *I, + SmallVectorImpl<DbgInfoIntrinsic *> *DbgInUses = 0); + +/// UserIsDebugInfo - Return true if U is a constant expr used by +/// llvm.dbg.variable or llvm.dbg.global_variable +bool UserIsDebugInfo(User *U); + +/// RemoveDbgInfoUser - Remove an User which is representing debug info. +void RemoveDbgInfoUser(User *U); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h new file mode 100644 index 0000000..35cfadd --- /dev/null +++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h @@ -0,0 +1,46 @@ +//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes an interface to promote alloca instructions to SSA +// registers, by using the SSA construction algorithm. +// +//===----------------------------------------------------------------------===// + +#ifndef TRANSFORMS_UTILS_PROMOTEMEMTOREG_H +#define TRANSFORMS_UTILS_PROMOTEMEMTOREG_H + +#include <vector> + +namespace llvm { + +class AllocaInst; +class DominatorTree; +class DominanceFrontier; +class AliasSetTracker; + +/// isAllocaPromotable - Return true if this alloca is legal for promotion. +/// This is true if there are only loads and stores to the alloca... +/// +bool isAllocaPromotable(const AllocaInst *AI); + +/// PromoteMemToReg - Promote the specified list of alloca instructions into +/// scalar registers, inserting PHI nodes as appropriate. This function makes +/// use of DominanceFrontier information. This function does not modify the CFG +/// of the function at all. All allocas must be from the same function. +/// +/// If AST is specified, the specified tracker is updated to reflect changes +/// made to the IR. +/// +void PromoteMemToReg(const std::vector<AllocaInst*> &Allocas, + DominatorTree &DT, DominanceFrontier &DF, + AliasSetTracker *AST = 0); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h new file mode 100644 index 0000000..c2d0993 --- /dev/null +++ b/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h @@ -0,0 +1,49 @@ +//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass is used to ensure that functions have at most one return and one +// unwind instruction in them. Additionally, it keeps track of which node is +// the new exit node of the CFG. If there are no return or unwind instructions +// in the function, the getReturnBlock/getUnwindBlock methods will return a null +// pointer. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UNIFYFUNCTIONEXITNODES_H +#define LLVM_TRANSFORMS_UNIFYFUNCTIONEXITNODES_H + +#include "llvm/Pass.h" + +namespace llvm { + +struct UnifyFunctionExitNodes : public FunctionPass { + BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock; +public: + static char ID; // Pass identification, replacement for typeid + UnifyFunctionExitNodes() : FunctionPass(&ID), + ReturnBlock(0), UnwindBlock(0) {} + + // We can preserve non-critical-edgeness when we unify function exit nodes + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + + // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistant) + // return, unwind, or unreachable basic blocks in the CFG. + // + BasicBlock *getReturnBlock() const { return ReturnBlock; } + BasicBlock *getUnwindBlock() const { return UnwindBlock; } + BasicBlock *getUnreachableBlock() const { return UnreachableBlock; } + + virtual bool runOnFunction(Function &F); +}; + +Pass *createUnifyFunctionExitNodesPass(); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/UnrollLoop.h b/include/llvm/Transforms/Utils/UnrollLoop.h new file mode 100644 index 0000000..a9c0bf6 --- /dev/null +++ b/include/llvm/Transforms/Utils/UnrollLoop.h @@ -0,0 +1,29 @@ +//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines some loop unrolling utilities. It does not define any +// actual pass or policy, but provides a single function to perform loop +// unrolling. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H +#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H + +#include "llvm/Analysis/LoopInfo.h" + +namespace llvm { + +class LPPassManager; + +bool UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM); + +} + +#endif diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h new file mode 100644 index 0000000..ed33413 --- /dev/null +++ b/include/llvm/Transforms/Utils/ValueMapper.h @@ -0,0 +1,29 @@ +//===- ValueMapper.h - Interface shared by lib/Transforms/Utils -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the MapValue interface which is used by various parts of +// the Transforms/Utils library to implement cloning and linking facilities. +// +//===----------------------------------------------------------------------===// + +#ifndef VALUEMAPPER_H +#define VALUEMAPPER_H + +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + class Value; + class Instruction; + typedef DenseMap<const Value *, Value *> ValueMapTy; + + Value *MapValue(const Value *V, ValueMapTy &VM); + void RemapInstruction(Instruction *I, ValueMapTy &VM); +} // End llvm namespace + +#endif diff --git a/include/llvm/Type.h b/include/llvm/Type.h new file mode 100644 index 0000000..5ce23ef --- /dev/null +++ b/include/llvm/Type.h @@ -0,0 +1,467 @@ +//===-- llvm/Type.h - Classes for handling data types -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + +#ifndef LLVM_TYPE_H +#define LLVM_TYPE_H + +#include "llvm/AbstractTypeUser.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/GraphTraits.h" +#include "llvm/ADT/iterator.h" +#include <string> +#include <vector> + +namespace llvm { + +class DerivedType; +class PointerType; +class IntegerType; +class TypeMapBase; +class raw_ostream; +class Module; + +/// This file contains the declaration of the Type class. For more "Type" type +/// stuff, look in DerivedTypes.h. +/// +/// The instances of the Type class are immutable: once they are created, +/// they are never changed. Also note that only one instance of a particular +/// type is ever created. Thus seeing if two types are equal is a matter of +/// doing a trivial pointer comparison. To enforce that no two equal instances +/// are created, Type instances can only be created via static factory methods +/// in class Type and in derived classes. +/// +/// Once allocated, Types are never free'd, unless they are an abstract type +/// that is resolved to a more concrete type. +/// +/// Types themself don't have a name, and can be named either by: +/// - using SymbolTable instance, typically from some Module, +/// - using convenience methods in the Module class (which uses module's +/// SymbolTable too). +/// +/// Opaque types are simple derived types with no state. There may be many +/// different Opaque type objects floating around, but two are only considered +/// identical if they are pointer equals of each other. This allows us to have +/// two opaque types that end up resolving to different concrete types later. +/// +/// Opaque types are also kinda weird and scary and different because they have +/// to keep a list of uses of the type. When, through linking, parsing, or +/// bitcode reading, they become resolved, they need to find and update all +/// users of the unknown type, causing them to reference a new, more concrete +/// type. Opaque types are deleted when their use list dwindles to zero users. +/// +/// @brief Root of type hierarchy +class Type : public AbstractTypeUser { +public: + //===-------------------------------------------------------------------===// + /// Definitions of all of the base types for the Type system. Based on this + /// value, you can cast to a "DerivedType" subclass (see DerivedTypes.h) + /// Note: If you add an element to this, you need to add an element to the + /// Type::getPrimitiveType function, or else things will break! + /// + enum TypeID { + // PrimitiveTypes .. make sure LastPrimitiveTyID stays up to date + VoidTyID = 0, ///< 0: type with no size + FloatTyID, ///< 1: 32 bit floating point type + DoubleTyID, ///< 2: 64 bit floating point type + X86_FP80TyID, ///< 3: 80 bit floating point type (X87) + FP128TyID, ///< 4: 128 bit floating point type (112-bit mantissa) + PPC_FP128TyID, ///< 5: 128 bit floating point type (two 64-bits) + LabelTyID, ///< 6: Labels + MetadataTyID, ///< 7: Metadata + + // Derived types... see DerivedTypes.h file... + // Make sure FirstDerivedTyID stays up to date!!! + IntegerTyID, ///< 8: Arbitrary bit width integers + FunctionTyID, ///< 9: Functions + StructTyID, ///< 10: Structures + ArrayTyID, ///< 11: Arrays + PointerTyID, ///< 12: Pointers + OpaqueTyID, ///< 13: Opaque: type with unknown structure + VectorTyID, ///< 14: SIMD 'packed' format, or other vector type + + NumTypeIDs, // Must remain as last defined ID + LastPrimitiveTyID = LabelTyID, + FirstDerivedTyID = IntegerTyID + }; + +private: + TypeID ID : 8; // The current base type of this type. + bool Abstract : 1; // True if type contains an OpaqueType + unsigned SubclassData : 23; //Space for subclasses to store data + + /// RefCount - This counts the number of PATypeHolders that are pointing to + /// this type. When this number falls to zero, if the type is abstract and + /// has no AbstractTypeUsers, the type is deleted. This is only sensical for + /// derived types. + /// + mutable unsigned RefCount; + + const Type *getForwardedTypeInternal() const; + + // Some Type instances are allocated as arrays, some aren't. So we provide + // this method to get the right kind of destruction for the type of Type. + void destroy() const; // const is a lie, this does "delete this"! + +protected: + explicit Type(TypeID id) : ID(id), Abstract(false), SubclassData(0), + RefCount(0), ForwardType(0), NumContainedTys(0), + ContainedTys(0) {} + virtual ~Type() { + assert(AbstractTypeUsers.empty() && "Abstract types remain"); + } + + /// Types can become nonabstract later, if they are refined. + /// + inline void setAbstract(bool Val) { Abstract = Val; } + + unsigned getRefCount() const { return RefCount; } + + unsigned getSubclassData() const { return SubclassData; } + void setSubclassData(unsigned val) { SubclassData = val; } + + /// ForwardType - This field is used to implement the union find scheme for + /// abstract types. When types are refined to other types, this field is set + /// to the more refined type. Only abstract types can be forwarded. + mutable const Type *ForwardType; + + + /// AbstractTypeUsers - Implement a list of the users that need to be notified + /// if I am a type, and I get resolved into a more concrete type. + /// + mutable std::vector<AbstractTypeUser *> AbstractTypeUsers; + + /// NumContainedTys - Keeps track of how many PATypeHandle instances there + /// are at the end of this type instance for the list of contained types. It + /// is the subclasses responsibility to set this up. Set to 0 if there are no + /// contained types in this type. + unsigned NumContainedTys; + + /// ContainedTys - A pointer to the array of Types (PATypeHandle) contained + /// by this Type. For example, this includes the arguments of a function + /// type, the elements of a structure, the pointee of a pointer, the element + /// type of an array, etc. This pointer may be 0 for types that don't + /// contain other types (Integer, Double, Float). In general, the subclass + /// should arrange for space for the PATypeHandles to be included in the + /// allocation of the type object and set this pointer to the address of the + /// first element. This allows the Type class to manipulate the ContainedTys + /// without understanding the subclass's placement for this array. keeping + /// it here also allows the subtype_* members to be implemented MUCH more + /// efficiently, and dynamically very few types do not contain any elements. + PATypeHandle *ContainedTys; + +public: + void print(raw_ostream &O) const; + void print(std::ostream &O) const; + + /// @brief Debugging support: print to stderr + void dump() const; + + /// @brief Debugging support: print to stderr (use type names from context + /// module). + void dump(const Module *Context) const; + + //===--------------------------------------------------------------------===// + // Property accessors for dealing with types... Some of these virtual methods + // are defined in private classes defined in Type.cpp for primitive types. + // + + /// getTypeID - Return the type id for the type. This will return one + /// of the TypeID enum elements defined above. + /// + inline TypeID getTypeID() const { return ID; } + + /// getDescription - Return the string representation of the type. + std::string getDescription() const; + + /// isInteger - True if this is an instance of IntegerType. + /// + bool isInteger() const { return ID == IntegerTyID; } + + /// isIntOrIntVector - Return true if this is an integer type or a vector of + /// integer types. + /// + bool isIntOrIntVector() const; + + /// isFloatingPoint - Return true if this is one of the two floating point + /// types + bool isFloatingPoint() const { return ID == FloatTyID || ID == DoubleTyID || + ID == X86_FP80TyID || ID == FP128TyID || ID == PPC_FP128TyID; } + + /// isFPOrFPVector - Return true if this is a FP type or a vector of FP types. + /// + bool isFPOrFPVector() const; + + /// isAbstract - True if the type is either an Opaque type, or is a derived + /// type that includes an opaque type somewhere in it. + /// + inline bool isAbstract() const { return Abstract; } + + /// canLosslesslyBitCastTo - Return true if this type could be converted + /// with a lossless BitCast to type 'Ty'. For example, uint to int. BitCasts + /// are valid for types of the same size only where no re-interpretation of + /// the bits is done. + /// @brief Determine if this type could be losslessly bitcast to Ty + bool canLosslesslyBitCastTo(const Type *Ty) const; + + + /// Here are some useful little methods to query what type derived types are + /// Note that all other types can just compare to see if this == Type::xxxTy; + /// + inline bool isPrimitiveType() const { return ID <= LastPrimitiveTyID; } + inline bool isDerivedType() const { return ID >= FirstDerivedTyID; } + + /// isFirstClassType - Return true if the type is "first class", meaning it + /// is a valid type for a Value. + /// + inline bool isFirstClassType() const { + // There are more first-class kinds than non-first-class kinds, so a + // negative test is simpler than a positive one. + return ID != FunctionTyID && ID != VoidTyID && ID != OpaqueTyID; + } + + /// isSingleValueType - Return true if the type is a valid type for a + /// virtual register in codegen. This includes all first-class types + /// except struct and array types. + /// + inline bool isSingleValueType() const { + return (ID != VoidTyID && ID <= LastPrimitiveTyID) || + ID == IntegerTyID || ID == PointerTyID || ID == VectorTyID; + } + + /// isAggregateType - Return true if the type is an aggregate type. This + /// means it is valid as the first operand of an insertvalue or + /// extractvalue instruction. This includes struct and array types, but + /// does not include vector types. + /// + inline bool isAggregateType() const { + return ID == StructTyID || ID == ArrayTyID; + } + + /// isSized - Return true if it makes sense to take the size of this type. To + /// get the actual size for a particular target, it is reasonable to use the + /// TargetData subsystem to do this. + /// + bool isSized() const { + // If it's a primitive, it is always sized. + if (ID == IntegerTyID || isFloatingPoint() || ID == PointerTyID) + return true; + // If it is not something that can have a size (e.g. a function or label), + // it doesn't have a size. + if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID) + return false; + // If it is something that can have a size and it's concrete, it definitely + // has a size, otherwise we have to try harder to decide. + return !isAbstract() || isSizedDerivedType(); + } + + /// getPrimitiveSizeInBits - Return the basic size of this type if it is a + /// primitive type. These are fixed by LLVM and are not target dependent. + /// This will return zero if the type does not have a size or is not a + /// primitive type. + /// + unsigned getPrimitiveSizeInBits() const; + + /// getFPMantissaWidth - Return the width of the mantissa of this type. This + /// is only valid on scalar floating point types. If the FP type does not + /// have a stable mantissa (e.g. ppc long double), this method returns -1. + int getFPMantissaWidth() const { + assert(isFloatingPoint() && "Not a floating point type!"); + if (ID == FloatTyID) return 24; + if (ID == DoubleTyID) return 53; + if (ID == X86_FP80TyID) return 64; + if (ID == FP128TyID) return 113; + assert(ID == PPC_FP128TyID && "unknown fp type"); + return -1; + } + + /// getForwardedType - Return the type that this type has been resolved to if + /// it has been resolved to anything. This is used to implement the + /// union-find algorithm for type resolution, and shouldn't be used by general + /// purpose clients. + const Type *getForwardedType() const { + if (!ForwardType) return 0; + return getForwardedTypeInternal(); + } + + /// getVAArgsPromotedType - Return the type an argument of this type + /// will be promoted to if passed through a variable argument + /// function. + const Type *getVAArgsPromotedType() const; + + //===--------------------------------------------------------------------===// + // Type Iteration support + // + typedef PATypeHandle *subtype_iterator; + subtype_iterator subtype_begin() const { return ContainedTys; } + subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];} + + /// getContainedType - This method is used to implement the type iterator + /// (defined a the end of the file). For derived types, this returns the + /// types 'contained' in the derived type. + /// + const Type *getContainedType(unsigned i) const { + assert(i < NumContainedTys && "Index out of range!"); + return ContainedTys[i].get(); + } + + /// getNumContainedTypes - Return the number of types in the derived type. + /// + unsigned getNumContainedTypes() const { return NumContainedTys; } + + //===--------------------------------------------------------------------===// + // Static members exported by the Type class itself. Useful for getting + // instances of Type. + // + + /// getPrimitiveType - Return a type based on an identifier. + static const Type *getPrimitiveType(TypeID IDNumber); + + //===--------------------------------------------------------------------===// + // These are the builtin types that are always available... + // + static const Type *VoidTy, *LabelTy, *FloatTy, *DoubleTy, *MetadataTy; + static const Type *X86_FP80Ty, *FP128Ty, *PPC_FP128Ty; + static const IntegerType *Int1Ty, *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty; + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const Type *) { return true; } + + void addRef() const { + assert(isAbstract() && "Cannot add a reference to a non-abstract type!"); + ++RefCount; + } + + void dropRef() const { + assert(isAbstract() && "Cannot drop a reference to a non-abstract type!"); + assert(RefCount && "No objects are currently referencing this object!"); + + // If this is the last PATypeHolder using this object, and there are no + // PATypeHandles using it, the type is dead, delete it now. + if (--RefCount == 0 && AbstractTypeUsers.empty()) + this->destroy(); + } + + /// addAbstractTypeUser - Notify an abstract type that there is a new user of + /// it. This function is called primarily by the PATypeHandle class. + /// + void addAbstractTypeUser(AbstractTypeUser *U) const { + assert(isAbstract() && "addAbstractTypeUser: Current type not abstract!"); + AbstractTypeUsers.push_back(U); + } + + /// removeAbstractTypeUser - Notify an abstract type that a user of the class + /// no longer has a handle to the type. This function is called primarily by + /// the PATypeHandle class. When there are no users of the abstract type, it + /// is annihilated, because there is no way to get a reference to it ever + /// again. + /// + void removeAbstractTypeUser(AbstractTypeUser *U) const; + + /// getPointerTo - Return a pointer to the current type. This is equivalent + /// to PointerType::get(Foo, AddrSpace). + PointerType *getPointerTo(unsigned AddrSpace = 0) const; + +private: + /// isSizedDerivedType - Derived types like structures and arrays are sized + /// iff all of the members of the type are sized as well. Since asking for + /// their size is relatively uncommon, move this operation out of line. + bool isSizedDerivedType() const; + + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + virtual void typeBecameConcrete(const DerivedType *AbsTy); + +protected: + // PromoteAbstractToConcrete - This is an internal method used to calculate + // change "Abstract" from true to false when types are refined. + void PromoteAbstractToConcrete(); + friend class TypeMapBase; +}; + +//===----------------------------------------------------------------------===// +// Define some inline methods for the AbstractTypeUser.h:PATypeHandle class. +// These are defined here because they MUST be inlined, yet are dependent on +// the definition of the Type class. +// +inline void PATypeHandle::addUser() { + assert(Ty && "Type Handle has a null type!"); + if (Ty->isAbstract()) + Ty->addAbstractTypeUser(User); +} +inline void PATypeHandle::removeUser() { + if (Ty->isAbstract()) + Ty->removeAbstractTypeUser(User); +} + +// Define inline methods for PATypeHolder. + +/// get - This implements the forwarding part of the union-find algorithm for +/// abstract types. Before every access to the Type*, we check to see if the +/// type we are pointing to is forwarding to a new type. If so, we drop our +/// reference to the type. +/// +inline Type* PATypeHolder::get() const { + const Type *NewTy = Ty->getForwardedType(); + if (!NewTy) return const_cast<Type*>(Ty); + return *const_cast<PATypeHolder*>(this) = NewTy; +} + +inline void PATypeHolder::addRef() { + assert(Ty && "Type Holder has a null type!"); + if (Ty->isAbstract()) + Ty->addRef(); +} + +inline void PATypeHolder::dropRef() { + if (Ty->isAbstract()) + Ty->dropRef(); +} + + +//===----------------------------------------------------------------------===// +// Provide specializations of GraphTraits to be able to treat a type as a +// graph of sub types... + +template <> struct GraphTraits<Type*> { + typedef Type NodeType; + typedef Type::subtype_iterator ChildIteratorType; + + static inline NodeType *getEntryNode(Type *T) { return T; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->subtype_begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->subtype_end(); + } +}; + +template <> struct GraphTraits<const Type*> { + typedef const Type NodeType; + typedef Type::subtype_iterator ChildIteratorType; + + static inline NodeType *getEntryNode(const Type *T) { return T; } + static inline ChildIteratorType child_begin(NodeType *N) { + return N->subtype_begin(); + } + static inline ChildIteratorType child_end(NodeType *N) { + return N->subtype_end(); + } +}; + +template <> inline bool isa_impl<PointerType, Type>(const Type &Ty) { + return Ty.getTypeID() == Type::PointerTyID; +} + +std::ostream &operator<<(std::ostream &OS, const Type &T); +raw_ostream &operator<<(raw_ostream &OS, const Type &T); + +} // End llvm namespace + +#endif diff --git a/include/llvm/TypeSymbolTable.h b/include/llvm/TypeSymbolTable.h new file mode 100644 index 0000000..e1459b0 --- /dev/null +++ b/include/llvm/TypeSymbolTable.h @@ -0,0 +1,136 @@ +//===-- llvm/TypeSymbolTable.h - Implement a Type Symtab --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the name/type symbol table for LLVM. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TYPE_SYMBOL_TABLE_H +#define LLVM_TYPE_SYMBOL_TABLE_H + +#include "llvm/Type.h" +#include <map> + +namespace llvm { + +/// This class provides a symbol table of name/type pairs with operations to +/// support constructing, searching and iterating over the symbol table. The +/// class derives from AbstractTypeUser so that the contents of the symbol +/// table can be updated when abstract types become concrete. +class TypeSymbolTable : public AbstractTypeUser { + +/// @name Types +/// @{ +public: + + /// @brief A mapping of names to types. + typedef std::map<const std::string, const Type*> TypeMap; + + /// @brief An iterator over the TypeMap. + typedef TypeMap::iterator iterator; + + /// @brief A const_iterator over the TypeMap. + typedef TypeMap::const_iterator const_iterator; + +/// @} +/// @name Constructors +/// @{ +public: + + TypeSymbolTable():LastUnique(0) {} + ~TypeSymbolTable(); + +/// @} +/// @name Accessors +/// @{ +public: + + /// Generates a unique name for a type based on the \p BaseName by + /// incrementing an integer and appending it to the name, if necessary + /// @returns the unique name + /// @brief Get a unique name for a type + std::string getUniqueName(const std::string &BaseName) const; + + /// This method finds the type with the given \p name in the type map + /// and returns it. + /// @returns null if the name is not found, otherwise the Type + /// associated with the \p name. + /// @brief Lookup a type by name. + Type* lookup(const std::string& name) const; + + /// @returns true iff the symbol table is empty. + /// @brief Determine if the symbol table is empty + inline bool empty() const { return tmap.empty(); } + + /// @returns the size of the symbol table + /// @brief The number of name/type pairs is returned. + inline unsigned size() const { return unsigned(tmap.size()); } + + /// This function can be used from the debugger to display the + /// content of the symbol table while debugging. + /// @brief Print out symbol table on stderr + void dump() const; + +/// @} +/// @name Iteration +/// @{ +public: + /// Get an iterator to the start of the symbol table + inline iterator begin() { return tmap.begin(); } + + /// @brief Get a const_iterator to the start of the symbol table + inline const_iterator begin() const { return tmap.begin(); } + + /// Get an iterator to the end of the symbol table. + inline iterator end() { return tmap.end(); } + + /// Get a const_iterator to the end of the symbol table. + inline const_iterator end() const { return tmap.end(); } + +/// @} +/// @name Mutators +/// @{ +public: + + /// Inserts a type into the symbol table with the specified name. There can be + /// a many-to-one mapping between names and types. This method allows a type + /// with an existing entry in the symbol table to get a new name. + /// @brief Insert a type under a new name. + void insert(const std::string &Name, const Type *Typ); + + /// Remove a type at the specified position in the symbol table. + /// @returns the removed Type. + /// @returns the Type that was erased from the symbol table. + Type* remove(iterator TI); + +/// @} +/// @name AbstractTypeUser Methods +/// @{ +private: + /// This function is called when one of the types in the type plane + /// is refined. + virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); + + /// This function markes a type as being concrete (defined). + virtual void typeBecameConcrete(const DerivedType *AbsTy); + +/// @} +/// @name Internal Data +/// @{ +private: + TypeMap tmap; ///< This is the mapping of names to types. + mutable uint32_t LastUnique; ///< Counter for tracking unique names + +/// @} + +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Use.h b/include/llvm/Use.h new file mode 100644 index 0000000..489dbc5 --- /dev/null +++ b/include/llvm/Use.h @@ -0,0 +1,237 @@ +//===-- llvm/Use.h - Definition of the Use class ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This defines the Use class. The Use class represents the operand of an +// instruction or some other User instance which refers to a Value. The Use +// class keeps the "use list" of the referenced value up to date. +// +// Pointer tagging is used to efficiently find the User corresponding +// to a Use without having to store a User pointer in every Use. A +// User is preceded in memory by all the Uses corresponding to its +// operands, and the low bits of one of the fields (Prev) of the Use +// class are used to encode offsets to be able to find that User given +// a pointer to any Use. For details, see: +// +// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_USE_H +#define LLVM_USE_H + +#include "llvm/Support/Casting.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/PointerIntPair.h" + +namespace llvm { + +class Value; +class User; +class Use; + +/// Tag - generic tag type for (at least 32 bit) pointers +enum Tag { noTag, tagOne, tagTwo, tagThree }; + +// Use** is only 4-byte aligned. +template<> +class PointerLikeTypeTraits<Use**> { +public: + static inline void *getAsVoidPointer(Use** P) { return P; } + static inline Use **getFromVoidPointer(void *P) { + return static_cast<Use**>(P); + } + enum { NumLowBitsAvailable = 2 }; +}; + +//===----------------------------------------------------------------------===// +// Use Class +//===----------------------------------------------------------------------===// + +/// Use is here to make keeping the "use" list of a Value up-to-date really +/// easy. +class Use { +public: + /// swap - provide a fast substitute to std::swap<Use> + /// that also works with less standard-compliant compilers + void swap(Use &RHS); + +private: + /// Copy ctor - do not implement + Use(const Use &U); + + /// Destructor - Only for zap() + inline ~Use() { + if (Val) removeFromList(); + } + + /// Default ctor - This leaves the Use completely uninitialized. The only + /// thing that is valid to do with this use is to call the "init" method. + inline Use() {} + enum PrevPtrTag { zeroDigitTag = noTag + , oneDigitTag = tagOne + , stopTag = tagTwo + , fullStopTag = tagThree }; + +public: + /// Normally Use will just implicitly convert to a Value* that it holds. + operator Value*() const { return Val; } + + /// If implicit conversion to Value* doesn't work, the get() method returns + /// the Value*. + Value *get() const { return Val; } + + /// getUser - This returns the User that contains this Use. For an + /// instruction operand, for example, this will return the instruction. + User *getUser() const; + + inline void set(Value *Val); + + Value *operator=(Value *RHS) { + set(RHS); + return RHS; + } + const Use &operator=(const Use &RHS) { + set(RHS.Val); + return *this; + } + + Value *operator->() { return Val; } + const Value *operator->() const { return Val; } + + Use *getNext() const { return Next; } + + + /// zap - This is used to destroy Use operands when the number of operands of + /// a User changes. + static void zap(Use *Start, const Use *Stop, bool del = false); + + /// getPrefix - Return deletable pointer if appropriate + Use *getPrefix(); +private: + const Use* getImpliedUser() const; + static Use *initTags(Use *Start, Use *Stop, ptrdiff_t Done = 0); + + Value *Val; + Use *Next; + PointerIntPair<Use**, 2, PrevPtrTag> Prev; + + void setPrev(Use **NewPrev) { + Prev.setPointer(NewPrev); + } + void addToList(Use **List) { + Next = *List; + if (Next) Next->setPrev(&Next); + setPrev(List); + *List = this; + } + void removeFromList() { + Use **StrippedPrev = Prev.getPointer(); + *StrippedPrev = Next; + if (Next) Next->setPrev(StrippedPrev); + } + + friend class Value; + friend class User; +}; + +// simplify_type - Allow clients to treat uses just like values when using +// casting operators. +template<> struct simplify_type<Use> { + typedef Value* SimpleType; + static SimpleType getSimplifiedValue(const Use &Val) { + return static_cast<SimpleType>(Val.get()); + } +}; +template<> struct simplify_type<const Use> { + typedef Value* SimpleType; + static SimpleType getSimplifiedValue(const Use &Val) { + return static_cast<SimpleType>(Val.get()); + } +}; + + + +template<typename UserTy> // UserTy == 'User' or 'const User' +class value_use_iterator : public forward_iterator<UserTy*, ptrdiff_t> { + typedef forward_iterator<UserTy*, ptrdiff_t> super; + typedef value_use_iterator<UserTy> _Self; + + Use *U; + explicit value_use_iterator(Use *u) : U(u) {} + friend class Value; +public: + typedef typename super::reference reference; + typedef typename super::pointer pointer; + + value_use_iterator(const _Self &I) : U(I.U) {} + value_use_iterator() {} + + bool operator==(const _Self &x) const { + return U == x.U; + } + bool operator!=(const _Self &x) const { + return !operator==(x); + } + + /// atEnd - return true if this iterator is equal to use_end() on the value. + bool atEnd() const { return U == 0; } + + // Iterator traversal: forward iteration only + _Self &operator++() { // Preincrement + assert(U && "Cannot increment end iterator!"); + U = U->getNext(); + return *this; + } + _Self operator++(int) { // Postincrement + _Self tmp = *this; ++*this; return tmp; + } + + // Retrieve a pointer to the current User. + UserTy *operator*() const { + assert(U && "Cannot dereference end iterator!"); + return U->getUser(); + } + + UserTy *operator->() const { return operator*(); } + + Use &getUse() const { return *U; } + + /// getOperandNo - Return the operand # of this use in its User. Defined in + /// User.h + /// + unsigned getOperandNo() const; +}; + + +template<> struct simplify_type<value_use_iterator<User> > { + typedef User* SimpleType; + + static SimpleType getSimplifiedValue(const value_use_iterator<User> &Val) { + return *Val; + } +}; + +template<> struct simplify_type<const value_use_iterator<User> > + : public simplify_type<value_use_iterator<User> > {}; + +template<> struct simplify_type<value_use_iterator<const User> > { + typedef const User* SimpleType; + + static SimpleType getSimplifiedValue(const + value_use_iterator<const User> &Val) { + return *Val; + } +}; + +template<> struct simplify_type<const value_use_iterator<const User> > + : public simplify_type<value_use_iterator<const User> > {}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/User.h b/include/llvm/User.h new file mode 100644 index 0000000..8a24406 --- /dev/null +++ b/include/llvm/User.h @@ -0,0 +1,205 @@ +//===-- llvm/User.h - User class definition ---------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class defines the interface that one who 'use's a Value must implement. +// Each instance of the Value class keeps track of what User's have handles +// to it. +// +// * Instructions are the largest class of User's. +// * Constants may be users of other constants (think arrays and stuff) +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_USER_H +#define LLVM_USER_H + +#include "llvm/Value.h" + +namespace llvm { + +/// OperandTraits - Compile-time customization of +/// operand-related allocators and accessors +/// for use of the User class +template <class> +struct OperandTraits; + +class User; + +/// OperandTraits<User> - specialization to User +template <> +struct OperandTraits<User> { + static inline Use *op_begin(User*); + static inline Use *op_end(User*); + static inline unsigned operands(const User*); + template <class U> + struct Layout { + typedef U overlay; + }; + static inline void *allocate(unsigned); +}; + +class User : public Value { + User(const User &); // Do not implement + void *operator new(size_t); // Do not implement + template <unsigned> + friend struct HungoffOperandTraits; +protected: + /// OperandList - This is a pointer to the array of Uses for this User. + /// For nodes of fixed arity (e.g. a binary operator) this array will live + /// prefixed to some derived class instance. For nodes of resizable variable + /// arity (e.g. PHINodes, SwitchInst etc.), this memory will be dynamically + /// allocated and should be destroyed by the classes' virtual dtor. + Use *OperandList; + + /// NumOperands - The number of values used by this User. + /// + unsigned NumOperands; + + void *operator new(size_t s, unsigned Us); + void *operator new(size_t s, unsigned Us, bool Prefix); + User(const Type *ty, unsigned vty, Use *OpList, unsigned NumOps) + : Value(ty, vty), OperandList(OpList), NumOperands(NumOps) {} + Use *allocHungoffUses(unsigned) const; + void dropHungoffUses(Use *U) { + if (OperandList == U) { + OperandList = 0; + NumOperands = 0; + } + Use::zap(U, U->getImpliedUser(), true); + } +public: + ~User() { + if ((intptr_t(OperandList) & 1) == 0) + Use::zap(OperandList, OperandList + NumOperands); + } + /// operator delete - free memory allocated for User and Use objects + void operator delete(void *Usr); + /// placement delete - required by std, but never called. + void operator delete(void*, unsigned) { + assert(0 && "Constructor throws?"); + } + /// placement delete - required by std, but never called. + void operator delete(void*, unsigned, bool) { + assert(0 && "Constructor throws?"); + } +protected: + template <int Idx, typename U> static Use &OpFrom(const U *that) { + return Idx < 0 + ? OperandTraits<U>::op_end(const_cast<U*>(that))[Idx] + : OperandTraits<U>::op_begin(const_cast<U*>(that))[Idx]; + } + template <int Idx> Use &Op() { + return OpFrom<Idx>(this); + } + template <int Idx> const Use &Op() const { + return OpFrom<Idx>(this); + } +public: + Value *getOperand(unsigned i) const { + assert(i < NumOperands && "getOperand() out of range!"); + return OperandList[i]; + } + void setOperand(unsigned i, Value *Val) { + assert(i < NumOperands && "setOperand() out of range!"); + assert((!isa<Constant>((const Value*)this) || + isa<GlobalValue>((const Value*)this)) && + "Cannot mutate a constant with setOperand!"); + OperandList[i] = Val; + } + const Use &getOperandUse(unsigned i) const { + assert(i < NumOperands && "getOperand() out of range!"); + return OperandList[i]; + } + Use &getOperandUse(unsigned i) { + assert(i < NumOperands && "getOperand() out of range!"); + return OperandList[i]; + } + + unsigned getNumOperands() const { return NumOperands; } + + // --------------------------------------------------------------------------- + // Operand Iterator interface... + // + typedef Use* op_iterator; + typedef const Use* const_op_iterator; + + inline op_iterator op_begin() { return OperandList; } + inline const_op_iterator op_begin() const { return OperandList; } + inline op_iterator op_end() { return OperandList+NumOperands; } + inline const_op_iterator op_end() const { return OperandList+NumOperands; } + + // dropAllReferences() - This function is in charge of "letting go" of all + // objects that this User refers to. This allows one to + // 'delete' a whole class at a time, even though there may be circular + // references... First all references are dropped, and all use counts go to + // zero. Then everything is deleted for real. Note that no operations are + // valid on an object that has "dropped all references", except operator + // delete. + // + void dropAllReferences() { + for (op_iterator i = op_begin(), e = op_end(); i != e; ++i) + i->set(0); + } + + /// replaceUsesOfWith - Replaces all references to the "From" definition with + /// references to the "To" definition. + /// + void replaceUsesOfWith(Value *From, Value *To); + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const User *) { return true; } + static inline bool classof(const Value *V) { + return isa<Instruction>(V) || isa<Constant>(V); + } +}; + +inline Use *OperandTraits<User>::op_begin(User *U) { + return U->op_begin(); +} + +inline Use *OperandTraits<User>::op_end(User *U) { + return U->op_end(); +} + +inline unsigned OperandTraits<User>::operands(const User *U) { + return U->getNumOperands(); +} + +template<> struct simplify_type<User::op_iterator> { + typedef Value* SimpleType; + + static SimpleType getSimplifiedValue(const User::op_iterator &Val) { + return static_cast<SimpleType>(Val->get()); + } +}; + +template<> struct simplify_type<const User::op_iterator> + : public simplify_type<User::op_iterator> {}; + +template<> struct simplify_type<User::const_op_iterator> { + typedef Value* SimpleType; + + static SimpleType getSimplifiedValue(const User::const_op_iterator &Val) { + return static_cast<SimpleType>(Val->get()); + } +}; + +template<> struct simplify_type<const User::const_op_iterator> + : public simplify_type<User::const_op_iterator> {}; + + +// value_use_iterator::getOperandNo - Requires the definition of the User class. +template<typename UserTy> +unsigned value_use_iterator<UserTy>::getOperandNo() const { + return U - U->getUser()->op_begin(); +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Value.h b/include/llvm/Value.h new file mode 100644 index 0000000..a38d8cb --- /dev/null +++ b/include/llvm/Value.h @@ -0,0 +1,332 @@ +//===-- llvm/Value.h - Definition of the Value class ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Value class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_VALUE_H +#define LLVM_VALUE_H + +#include "llvm/AbstractTypeUser.h" +#include "llvm/Use.h" +#include "llvm/Support/Casting.h" +#include <iosfwd> +#include <string> + +namespace llvm { + +class Constant; +class Argument; +class Instruction; +class BasicBlock; +class GlobalValue; +class Function; +class GlobalVariable; +class GlobalAlias; +class InlineAsm; +class ValueSymbolTable; +class TypeSymbolTable; +template<typename ValueTy> class StringMapEntry; +template <typename ValueTy = Value> +class AssertingVH; +typedef StringMapEntry<Value*> ValueName; +class raw_ostream; +class AssemblyAnnotationWriter; +class ValueHandleBase; + +//===----------------------------------------------------------------------===// +// Value Class +//===----------------------------------------------------------------------===// + +/// This is a very important LLVM class. It is the base class of all values +/// computed by a program that may be used as operands to other values. Value is +/// the super class of other important classes such as Instruction and Function. +/// All Values have a Type. Type is not a subclass of Value. All types can have +/// a name and they should belong to some Module. Setting the name on the Value +/// automatically updates the module's symbol table. +/// +/// Every value has a "use list" that keeps track of which other Values are +/// using this Value. A Value can also have an arbitrary number of ValueHandle +/// objects that watch it and listen to RAUW and Destroy events see +/// llvm/Support/ValueHandle.h for details. +/// +/// @brief LLVM Value Representation +class Value { + const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast) + unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this? +protected: + /// SubclassData - This member is defined by this class, but is not used for + /// anything. Subclasses can use it to hold whatever state they find useful. + /// This field is initialized to zero by the ctor. + unsigned short SubclassData; +private: + PATypeHolder VTy; + Use *UseList; + + friend class ValueSymbolTable; // Allow ValueSymbolTable to directly mod Name. + friend class SymbolTable; // Allow SymbolTable to directly poke Name. + friend class ValueHandleBase; + ValueName *Name; + + void operator=(const Value &); // Do not implement + Value(const Value &); // Do not implement + +public: + Value(const Type *Ty, unsigned scid); + virtual ~Value(); + + /// dump - Support for debugging, callable in GDB: V->dump() + // + virtual void dump() const; + + /// print - Implement operator<< on Value. + /// + void print(std::ostream &O, AssemblyAnnotationWriter *AAW = 0) const; + void print(raw_ostream &O, AssemblyAnnotationWriter *AAW = 0) const; + + /// All values are typed, get the type of this value. + /// + inline const Type *getType() const { return VTy; } + + // All values can potentially be named... + inline bool hasName() const { return Name != 0; } + ValueName *getValueName() const { return Name; } + + /// getNameStart - Return a pointer to a null terminated string for this name. + /// Note that names can have null characters within the string as well as at + /// their end. This always returns a non-null pointer. + const char *getNameStart() const; + /// getNameEnd - Return a pointer to the end of the name. + const char *getNameEnd() const { return getNameStart() + getNameLen(); } + + /// isName - Return true if this value has the name specified by the provided + /// nul terminated string. + bool isName(const char *N) const; + + /// getNameLen - Return the length of the string, correctly handling nul + /// characters embedded into them. + unsigned getNameLen() const; + + /// getName()/getNameStr() - Return the name of the specified value, + /// *constructing a string* to hold it. Because these are guaranteed to + /// construct a string, they are very expensive and should be avoided. + std::string getName() const { return getNameStr(); } + std::string getNameStr() const; + + + void setName(const std::string &name); + void setName(const char *Name, unsigned NameLen); + void setName(const char *Name); // Takes a null-terminated string. + + + /// takeName - transfer the name from V to this value, setting V's name to + /// empty. It is an error to call V->takeName(V). + void takeName(Value *V); + + /// replaceAllUsesWith - Go through the uses list for this definition and make + /// each use point to "V" instead of "this". After this completes, 'this's + /// use list is guaranteed to be empty. + /// + void replaceAllUsesWith(Value *V); + + // uncheckedReplaceAllUsesWith - Just like replaceAllUsesWith but dangerous. + // Only use when in type resolution situations! + void uncheckedReplaceAllUsesWith(Value *V); + + //---------------------------------------------------------------------- + // Methods for handling the chain of uses of this Value. + // + typedef value_use_iterator<User> use_iterator; + typedef value_use_iterator<const User> use_const_iterator; + + bool use_empty() const { return UseList == 0; } + use_iterator use_begin() { return use_iterator(UseList); } + use_const_iterator use_begin() const { return use_const_iterator(UseList); } + use_iterator use_end() { return use_iterator(0); } + use_const_iterator use_end() const { return use_const_iterator(0); } + User *use_back() { return *use_begin(); } + const User *use_back() const { return *use_begin(); } + + /// hasOneUse - Return true if there is exactly one user of this value. This + /// is specialized because it is a common request and does not require + /// traversing the whole use list. + /// + bool hasOneUse() const { + use_const_iterator I = use_begin(), E = use_end(); + if (I == E) return false; + return ++I == E; + } + + /// hasNUses - Return true if this Value has exactly N users. + /// + bool hasNUses(unsigned N) const; + + /// hasNUsesOrMore - Return true if this value has N users or more. This is + /// logically equivalent to getNumUses() >= N. + /// + bool hasNUsesOrMore(unsigned N) const; + + bool isUsedInBasicBlock(const BasicBlock *BB) const; + + /// getNumUses - This method computes the number of uses of this Value. This + /// is a linear time operation. Use hasOneUse, hasNUses, or hasMoreThanNUses + /// to check for specific values. + unsigned getNumUses() const; + + /// addUse - This method should only be used by the Use class. + /// + void addUse(Use &U) { U.addToList(&UseList); } + + /// An enumeration for keeping track of the concrete subclass of Value that + /// is actually instantiated. Values of this enumeration are kept in the + /// Value classes SubclassID field. They are used for concrete type + /// identification. + enum ValueTy { + ArgumentVal, // This is an instance of Argument + BasicBlockVal, // This is an instance of BasicBlock + FunctionVal, // This is an instance of Function + GlobalAliasVal, // This is an instance of GlobalAlias + GlobalVariableVal, // This is an instance of GlobalVariable + UndefValueVal, // This is an instance of UndefValue + ConstantExprVal, // This is an instance of ConstantExpr + ConstantAggregateZeroVal, // This is an instance of ConstantAggregateNull + ConstantIntVal, // This is an instance of ConstantInt + ConstantFPVal, // This is an instance of ConstantFP + ConstantArrayVal, // This is an instance of ConstantArray + ConstantStructVal, // This is an instance of ConstantStruct + ConstantVectorVal, // This is an instance of ConstantVector + ConstantPointerNullVal, // This is an instance of ConstantPointerNull + MDStringVal, // This is an instance of MDString + MDNodeVal, // This is an instance of MDNode + InlineAsmVal, // This is an instance of InlineAsm + PseudoSourceValueVal, // This is an instance of PseudoSourceValue + InstructionVal, // This is an instance of Instruction + + // Markers: + ConstantFirstVal = FunctionVal, + ConstantLastVal = MDNodeVal + }; + + /// getValueID - Return an ID for the concrete type of this object. This is + /// used to implement the classof checks. This should not be used for any + /// other purpose, as the values may change as LLVM evolves. Also, note that + /// for instructions, the Instruction's opcode is added to InstructionVal. So + /// this means three things: + /// # there is no value with code InstructionVal (no opcode==0). + /// # there are more possible values for the value type than in ValueTy enum. + /// # the InstructionVal enumerator must be the highest valued enumerator in + /// the ValueTy enum. + unsigned getValueID() const { + return SubclassID; + } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const Value *) { + return true; // Values are always values. + } + + /// getRawType - This should only be used to implement the vmcore library. + /// + const Type *getRawType() const { return VTy.getRawType(); } + + /// stripPointerCasts - This method strips off any unneeded pointer + /// casts from the specified value, returning the original uncasted value. + /// Note that the returned value has pointer type if the specified value does. + Value *stripPointerCasts(); + const Value *stripPointerCasts() const { + return const_cast<Value*>(this)->stripPointerCasts(); + } + + /// getUnderlyingObject - This method strips off any GEP address adjustments + /// and pointer casts from the specified value, returning the original object + /// being addressed. Note that the returned value has pointer type if the + /// specified value does. + Value *getUnderlyingObject(); + const Value *getUnderlyingObject() const { + return const_cast<Value*>(this)->getUnderlyingObject(); + } + + /// DoPHITranslation - If this value is a PHI node with CurBB as its parent, + /// return the value in the PHI node corresponding to PredBB. If not, return + /// ourself. This is useful if you want to know the value something has in a + /// predecessor block. + Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB); + + const Value *DoPHITranslation(const BasicBlock *CurBB, + const BasicBlock *PredBB) const{ + return const_cast<Value*>(this)->DoPHITranslation(CurBB, PredBB); + } +}; + +inline std::ostream &operator<<(std::ostream &OS, const Value &V) { + V.print(OS); + return OS; +} +inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) { + V.print(OS); + return OS; +} + +void Use::set(Value *V) { + if (Val) removeFromList(); + Val = V; + if (V) V->addUse(*this); +} + + +// isa - Provide some specializations of isa so that we don't have to include +// the subtype header files to test to see if the value is a subclass... +// +template <> inline bool isa_impl<Constant, Value>(const Value &Val) { + return Val.getValueID() >= Value::ConstantFirstVal && + Val.getValueID() <= Value::ConstantLastVal; +} +template <> inline bool isa_impl<Argument, Value>(const Value &Val) { + return Val.getValueID() == Value::ArgumentVal; +} +template <> inline bool isa_impl<InlineAsm, Value>(const Value &Val) { + return Val.getValueID() == Value::InlineAsmVal; +} +template <> inline bool isa_impl<Instruction, Value>(const Value &Val) { + return Val.getValueID() >= Value::InstructionVal; +} +template <> inline bool isa_impl<BasicBlock, Value>(const Value &Val) { + return Val.getValueID() == Value::BasicBlockVal; +} +template <> inline bool isa_impl<Function, Value>(const Value &Val) { + return Val.getValueID() == Value::FunctionVal; +} +template <> inline bool isa_impl<GlobalVariable, Value>(const Value &Val) { + return Val.getValueID() == Value::GlobalVariableVal; +} +template <> inline bool isa_impl<GlobalAlias, Value>(const Value &Val) { + return Val.getValueID() == Value::GlobalAliasVal; +} +template <> inline bool isa_impl<GlobalValue, Value>(const Value &Val) { + return isa<GlobalVariable>(Val) || isa<Function>(Val) || + isa<GlobalAlias>(Val); +} + + +// Value* is only 4-byte aligned. +template<> +class PointerLikeTypeTraits<Value*> { + typedef Value* PT; +public: + static inline void *getAsVoidPointer(PT P) { return P; } + static inline PT getFromVoidPointer(void *P) { + return static_cast<PT>(P); + } + enum { NumLowBitsAvailable = 2 }; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/ValueSymbolTable.h b/include/llvm/ValueSymbolTable.h new file mode 100644 index 0000000..752dd2f --- /dev/null +++ b/include/llvm/ValueSymbolTable.h @@ -0,0 +1,132 @@ +//===-- llvm/ValueSymbolTable.h - Implement a Value Symtab ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the name/Value symbol table for LLVM. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_VALUE_SYMBOL_TABLE_H +#define LLVM_VALUE_SYMBOL_TABLE_H + +#include "llvm/Value.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + template<typename ValueSubClass, typename ItemParentClass> + class SymbolTableListTraits; + class BasicBlock; + class Function; + class Module; + +/// This class provides a symbol table of name/value pairs. It is essentially +/// a std::map<std::string,Value*> but has a controlled interface provided by +/// LLVM as well as ensuring uniqueness of names. +/// +class ValueSymbolTable { + friend class Value; + friend class SymbolTableListTraits<Argument, Function>; + friend class SymbolTableListTraits<BasicBlock, Function>; + friend class SymbolTableListTraits<Instruction, BasicBlock>; + friend class SymbolTableListTraits<Function, Module>; + friend class SymbolTableListTraits<GlobalVariable, Module>; + friend class SymbolTableListTraits<GlobalAlias, Module>; +/// @name Types +/// @{ +public: + /// @brief A mapping of names to values. + typedef StringMap<Value*> ValueMap; + + /// @brief An iterator over a ValueMap. + typedef ValueMap::iterator iterator; + + /// @brief A const_iterator over a ValueMap. + typedef ValueMap::const_iterator const_iterator; + +/// @} +/// @name Constructors +/// @{ +public: + + ValueSymbolTable() : vmap(0), LastUnique(0) {} + ~ValueSymbolTable(); + +/// @} +/// @name Accessors +/// @{ +public: + + /// This method finds the value with the given \p name in the + /// the symbol table. + /// @returns the value associated with the \p name + /// @brief Lookup a named Value. + Value *lookup(const std::string &name) const; + Value *lookup(const char *NameBegin, const char *NameEnd) const; + + /// @returns true iff the symbol table is empty + /// @brief Determine if the symbol table is empty + inline bool empty() const { return vmap.empty(); } + + /// @brief The number of name/type pairs is returned. + inline unsigned size() const { return unsigned(vmap.size()); } + + /// This function can be used from the debugger to display the + /// content of the symbol table while debugging. + /// @brief Print out symbol table on stderr + void dump() const; + +/// @} +/// @name Iteration +/// @{ +public: + /// @brief Get an iterator that from the beginning of the symbol table. + inline iterator begin() { return vmap.begin(); } + + /// @brief Get a const_iterator that from the beginning of the symbol table. + inline const_iterator begin() const { return vmap.begin(); } + + /// @brief Get an iterator to the end of the symbol table. + inline iterator end() { return vmap.end(); } + + /// @brief Get a const_iterator to the end of the symbol table. + inline const_iterator end() const { return vmap.end(); } + +/// @} +/// @name Mutators +/// @{ +private: + /// This method adds the provided value \p N to the symbol table. The Value + /// must have a name which is used to place the value in the symbol table. + /// If the inserted name conflicts, this renames the value. + /// @brief Add a named value to the symbol table + void reinsertValue(Value *V); + + /// createValueName - This method attempts to create a value name and insert + /// it into the symbol table with the specified name. If it conflicts, it + /// auto-renames the name and returns that instead. + ValueName *createValueName(const char *NameStart, unsigned NameLen, Value *V); + + /// This method removes a value from the symbol table. It leaves the + /// ValueName attached to the value, but it is no longer inserted in the + /// symtab. + void removeValueName(ValueName *V); + +/// @} +/// @name Internal Data +/// @{ +private: + ValueMap vmap; ///< The map that holds the symbol table. + mutable uint32_t LastUnique; ///< Counter for tracking unique names + +/// @} +}; + +} // End llvm namespace + +#endif |